1 Load packages

library("RSQLite")      # for reading in db files 
library("tidyjson")     # for reading in json files 
library("knitr")        # for RMarkdown 
library("lubridate")    # for dealing with dates
library("lsr")          # for wideToLong() function
library("janitor")      # for cleaning variable names 
library("DT")           # nice html tables
library("Hmisc")        # bootstrapped confidence intervals
library("broom.mixed")  # tidy regression results
library("brms")         # Bayesian mixed effects models
library("tidybayes")    # for tidying up results from brms
library("emmeans")      # for estimated marginal means 
library("ggeffects")    # for effects plots 
library("xtable")       # export table to latex
library("kableExtra")   # nice html data tables in Rmarkdown
library("corrr")        # for correlations 
library("patchwork")    # for making figure panels
library("tidyverse")    # for everything else
# set ggplot theme 
theme_set(theme_classic())

# set knitr options 
opts_chunk$set(comment = "",
               results = "hold",
               fig.show = "hold")

# suppress summarize warnings 
options(dplyr.summarise.inform = F)

2 Functions

# function for printing out html or latex tables 
print_table = function(data, format = "html", digits = 2){
  if(format == "html"){
    data %>% 
      kable(digits = digits) %>% 
      kable_styling()
  }else if(format == "latex"){
    data %>% 
      xtable(digits = digits,
             caption = "Caption",
             label = "tab:table") %>%
      print(include.rownames = F,
            booktabs = T,
            sanitize.colnames.function = identity,
            caption.placement = "top")
  }
}

# root mean squared error 
rmse = function(x, y){
  return(sqrt(mean((x - y)^2)))
}


# brms table 
fun_brms_table = function(fit, format = "html"){
  fit %>% 
    tidy() %>% 
    filter(effect == "fixed") %>% 
    mutate(term = str_replace(term, fixed("(Intercept)"), "intercept")) %>% 
    select(term:conf.high) %>% 
    rename(`lower 95\\% HDI` = conf.low,
           `upper 95\\% HDI` = conf.high) %>% 
    print_table(format = format)
}

3 Experiment 1: Importance and surprise judgments

3.1 Read in and structure data

3.1.1 Read in data

df.exp1 = read.csv(file="../../data/experiment_1/experiment_1.csv",
                       stringsAsFactors = F) %>% 
  clean_names() %>% 
  filter(test_i_1 != "") %>% # filter complete participants 
  mutate(across(c(duration_in_seconds, test_i_1:x15_s_1, age_1_text), 
                as.numeric))

3.1.2 Bayesian inference model

library("greta")

func_surprise_model = function(data, a, b){
  data = unlist(data)
  
  n_party = c(data["n_same"], data["n_other"])
  n_yes = c(data["n_same_yes"], data["n_other_yes"])

  # prior
  party_same = beta(shape1 = a, shape2 = b)
  party_other = beta(shape1 = a, shape2 = b)
  policy = beta(shape1 = (a + b) / 2, shape2 = (a + b) / 2)

  # average parametrization
  vote_same = (party_same + policy)/2
  vote_other = (party_other + policy)/2

  vote = c(vote_same, vote_other)

  # likelihood
  distribution(n_yes) = binomial(size = n_party, prob = vote)

  # posterior
  model = model(party_same,
                party_other,
                policy,
                vote_same,
                vote_other)

  draws = mcmc(model,
            n_samples = 1000,
            warmup = 1000,
            chains = 4) %>%
    tidy_draws()

  return(draws)
}

# define the cases of interest 
n_same = 0:4
n_other = 0:4
n_same_yes = 0:4
n_other_yes = 0:4

df.cases = crossing(n_same,
                    n_other,
                    n_same_yes,
                    n_other_yes) %>% 
  filter((n_same + n_other == 4) | 
        (n_same + n_other == 2),
         n_same_yes <= n_same,
         n_other_yes <= n_other) %>% 
  mutate(situation = 1:n()) %>% 
  select(situation, everything())

# run the model
df.predictions = df.cases %>% 
  group_by(situation) %>%
  nest() %>%
  mutate(samples = map(.x = data,
                       .f = ~ func_surprise_model(.x, a = 10, b = 5)))

3.1.3 Read in model predictions

# load cases
load(file = "data/cases.RData")

# load predictions
load(file = "data/bayesian_surprise_model_a_10_b_5.RData")

# restructure the predictions
df.predictions = df.predictions %>% 
  unnest(samples) %>% 
  group_by(situation) %>% 
  select(situation, party_other:party_same) %>% 
  summarize_all(mean) %>% 
  left_join(df.cases,
            by = "situation")

3.1.4 Demographic data

df.exp1.demographics = df.exp1 %>% 
  select(response_id, duration_in_seconds, importance:ethnicity) %>% 
  select(-c(age, race)) %>% 
  rename(age = age_1_text,
         race = race_1_text) %>% 
  summarize(time_mean = round(mean(duration_in_seconds/60), 2),
            time_sd = round(sd(duration_in_seconds/60), 2),
            age_mean = round(mean(age)),
            age_sd = round(sd(age)),
            n_female = sum(gender == "Female"),
            n_male = sum(gender == "Male"))

df.exp1.demographics %>% 
  print_table()
time_mean time_sd age_mean age_sd n_female n_male
13.67 9.29 35 11 10 30

3.1.5 Participant feedback

df.exp1 %>% 
  select(importance, surprise) %>% 
  datatable()

3.1.6 Main data

df.exp1.long = df.exp1 %>% 
  select(participant = response_id, x1_i_1:x15_s_1) %>% 
  mutate(participant = 1:nrow(.)) %>% 
  gather("index", "rating", -participant) %>% 
  mutate(index = str_replace_all(index, "_1", "")) %>% 
  separate(index, into = c("trial", "question")) %>% 
  mutate(trial = str_replace_all(trial, "x", ""),
         trial = as.numeric(trial),
         question = factor(question,
                           levels = c("i", "s"),
                           labels = c("importance", "surprise"))) %>% 
  arrange(participant, trial, question)

3.2 Predictions

  • read in trialinfo and append model predictions
# write predictions for the cases in experiment 1
df.exp1.trialinfo = read.csv(file = "data/experiment1_trialinfo.csv",
                             fileEncoding = "UTF-8-BOM") %>% 
  clean_names() %>% 
  rename(rule = threshold,
         support = supporting_party,
         trial = qualtrics_no) %>% 
  mutate(n_same = rowSums(select(., p1:p5)),
         n_other = 5 - n_same)

df.exp1.predictions = df.exp1.trialinfo %>% 
  select(trial:outcome) %>% 
  gather("index", "value", p1:v5) %>% 
  separate(index, into = c("attribute", "person"), sep = -1) %>% 
  spread(attribute, value) %>% 
  rename(party = p,
         vote = v) %>% 
  group_by(trial) %>% 
  mutate(pivotality = ifelse(vote != outcome, 0, 
                             ifelse(outcome == 0, 
                                    1 / (abs(sum(vote) - rule)),
                                    1 / (abs(sum(vote) - rule) + 1))),
         n_causes = ifelse(vote == 1,
                                 sum(vote),
                                 sum(1-vote))) %>%
  ungroup() %>% 
  mutate(norm_disposition = ifelse(party == vote, 1, 0))
df.exp1.model = df.exp1.predictions %>% 
  select(trial, focus, person, party, vote) %>% 
  group_by(trial) %>% 
  filter(person != focus) %>% 
  summarize(n_same = sum(party == 1),
            n_other = sum(party == 0),
            n_same_yes = sum(party == 1 & vote == 1),
            n_other_yes = sum(party == 0 & vote == 1)
            ) %>% 
  ungroup() %>% 
  left_join(df.predictions %>% 
              select(-situation),
            by = c("n_same",
                   "n_other",
                   "n_same_yes",
                   "n_other_yes")) %>% 
  left_join(df.exp1.predictions %>%
              filter(focus == person),
            by = "trial") %>% 
  mutate(surprise = NA,
         surprise = ifelse(party == 1 & vote == 1,
                           1 - vote_same,
                           surprise),
         surprise = ifelse(party == 1 & vote == 0,
                           vote_same,
                           surprise),
         surprise = ifelse(party == 0 & vote == 1,
                           1 - vote_other,
                           surprise),
         surprise = ifelse(party == 0 & vote == 0,
                           vote_other,
                           surprise))

Table with the list of cases:

df.exp1.trialinfo %>% 
  select(-original_experiment)
   trial    support focus p1 p2 p3 p4 p5 v1 v2 v3 v4 v5 sum rule outcome
1      1 Republican     2  1  1  1  0  0  1  1  1  1  1   5    5       1
2      2 Democratic     3  1  1  1  1  1  1  1  1  1  1   5    1       1
3      3 Republican     1  0  0  0  0  0  1  1  0  0  0   2    1       1
4      4 Republican     1  0  0  0  0  0  1  0  0  0  0   1    1       1
5      5 Republican     2  1  1  1  1  1  1  1  1  1  1   5    5       1
6      6 Democratic     2  1  1  0  0  0  1  0  1  0  0   2    5       0
7      7 Republican     4  0  0  0  0  0  1  0  0  0  0   1    5       0
8      8 Democratic     3  1  1  1  0  0  1  1  0  0  0   2    4       0
9      9 Republican     1  1  1  1  0  0  1  1  1  1  1   5    1       1
10    10 Democratic     3  1  1  1  0  0  1  1  0  0  0   2    3       0
11    11 Republican     5  1  0  0  0  0  0  0  0  0  0   0    1       0
12    12 Democratic     1  1  1  0  0  0  1  1  0  0  0   2    2       1
13    13 Republican     2  1  1  1  1  0  1  1  1  0  1   4    4       1
14    14 Democratic     4  1  0  0  0  0  1  1  1  0  0   3    5       0
15    15 Republican     3  0  0  0  0  0  0  0  0  0  0   0    2       0
16    16 Democratic     2  1  0  0  0  0  1  1  0  0  0   2    2       1
17    17 Republican     4  1  1  1  0  0  1  1  1  1  0   4    4       1
18    18 Democratic     2  1  1  0  0  0  0  0  0  0  0   0    5       0
19    19 Republican     2  0  0  0  0  0  0  0  0  0  0   0    3       0
20    20 Democratic     2  1  1  0  0  0  1  1  1  1  0   4    3       1
21    21 Republican     3  0  0  0  0  0  1  0  0  0  0   1    4       0
22    22 Democratic     4  0  0  0  0  0  1  0  0  0  0   1    2       0
23    23 Democratic     5  1  0  0  0  0  1  0  0  0  0   1    3       0
24    24 Democratic     4  1  1  1  1  0  1  1  0  0  0   2    4       0
25    25 Democratic     5  1  0  0  0  0  0  1  1  1  1   4    1       1
26    26 Republican     5  1  1  0  0  0  0  0  1  1  1   3    2       1
27    27 Democratic     5  0  0  0  0  0  1  1  1  1  1   5    2       1
   trial_exp2 person n_same n_other
1         162      1      3       2
2          58      1      5       0
3          33      1      0       5
4          32      1      0       5
5         170      1      5       0
6         154      2      2       3
7         144      2      0       5
8         130      3      3       2
9          50      1      3       2
10        102      3      3       2
11         34      2      1       4
12         69      1      2       3
13        138      1      4       1
14        150      4      1       4
15         59      1      0       5
16         65      2      1       4
17        133      4      3       2
18        151      1      2       3
19         87      1      0       5
20        100      1      2       3
21        116      2      0       5
22         60      2      0       5
23         91      2      1       4
24        135      3      4       1
25         NA      5      1       4
26         NA      5      2       3
27         NA      5      0       5

3.3 Stats

3.3.1 Bayesian mixed effects models

df.exp1.regression = df.exp1.long %>% 
  group_by(trial, question) %>% 
  summarize(rating_mean = mean(rating),
            rating_low = smean.cl.boot(rating)[2],
            rating_high = smean.cl.boot(rating)[3]) %>% 
  ungroup() %>% 
  left_join(df.exp1.model,
            by = "trial")

3.3.1.1 Surprise

3.3.1.1.1 Bayesian surprise model
df.data = df.exp1.long %>% 
  filter(question == "surprise") %>% 
  left_join(df.exp1.model %>% 
              select(trial, surprise),
            by = "trial")

fit_brm_exp1_surprise_bayesian = brm(
  formula = rating ~ 1 + surprise + (1 + surprise | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  control = list(adapt_delta = 0.99),
  file = "cache/fit_brm_exp1_surprise_bayesian_a_10_b_5")
3.3.1.1.2 Dispositional normality
df.data = df.exp1.long %>% 
  filter(question == "surprise") %>% 
  left_join(df.exp1.model %>% 
              select(trial, norm_disposition, norm_situation = n_causes),
            by = "trial")

fit_brm_exp1_surprise_disp = brm(
  formula = rating ~ 1 + norm_disposition + (1 + norm_disposition | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp1_surprise_disp")
3.3.1.1.3 Model comparison
3.3.1.1.3.1 r and RMSE
func_model_evaluation = function(fit, question_type){
  df.exp1.regression %>% 
    filter(question == question_type) %>% 
    fitted(newdata = .,
           object = fit,
           re_formula = NA) %>% 
    as_tibble() %>% 
    clean_names() %>% 
    bind_cols(df.exp1.regression %>% 
                filter(question == question_type)) %>% 
    summarize(r = cor(estimate, rating_mean),
              rmse = rmse(estimate, rating_mean)) %>% 
    round(2)
}

# bayesian surprise
func_model_evaluation(fit = fit_brm_exp1_surprise_bayesian,
                      question_type = "surprise")

# dispositional
func_model_evaluation(fit = fit_brm_exp1_surprise_disp,
                      question_type = "surprise")
# A tibble: 1 x 2
      r  rmse
  <dbl> <dbl>
1  0.98  4.59
# A tibble: 1 x 2
      r  rmse
  <dbl> <dbl>
1  0.95  7.66
3.3.1.1.3.2 Model coefficients
tibble(model = c("fit_brm_exp1_surprise_bayesian",
                 "fit_brm_exp1_surprise_disp")) %>% 
  mutate(fit = map(model, get)) %>% 
  mutate(tidy = map(fit, ~ tidy(.,
                                effects = "fixed",
                                conf.method = "HPDinterval"))) %>% 
  unnest(tidy) %>% 
  select(-c(fit, effect, component)) %>% 
  print_table()
model term estimate std.error conf.low conf.high
fit_brm_exp1_surprise_bayesian (Intercept) -76.41 11.83 -97.94 -51.59
fit_brm_exp1_surprise_bayesian surprise 244.60 22.99 198.96 288.62
fit_brm_exp1_surprise_disp (Intercept) 65.96 3.11 60.21 72.13
fit_brm_exp1_surprise_disp norm_disposition -46.95 5.24 -56.61 -36.33
3.3.1.1.3.3 loo
fit_brm_exp1_surprise_bayesian = add_criterion(fit_brm_exp1_surprise_bayesian,
                                               criterion = c("loo", "waic"),
                                               reloo = T)

fit_brm_exp1_surprise_disp = add_criterion(fit_brm_exp1_surprise_disp,
                                               criterion = c("loo", "waic"),
                                               reloo = T)

loo_compare(fit_brm_exp1_surprise_bayesian,
            fit_brm_exp1_surprise_disp)

3.3.1.2 Importance

3.3.1.2.1 Causal attribution model
df.data = df.exp1.long %>% 
  filter(question == "importance") %>% 
  left_join(df.exp1.model %>% 
              select(trial, pivotality, n_causes),
            by = "trial")

fit_brm_exp1_importance_bayesian = brm(
  formula = rating ~ 1 + pivotality + n_causes + (1 + pivotality + n_causes | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  control = list(adapt_delta = 0.99),
  file = "cache/fit_brm_exp1_importance_bayesian")
3.3.1.2.2 Pivotality only
df.data = df.exp1.long %>% 
  filter(question == "importance") %>% 
  left_join(df.exp1.model %>% 
              select(trial, pivotality, n_causes),
            by = "trial")

fit_brm_exp1_importance_pivotality = brm(
  formula = rating ~ 1 + pivotality + (1 + pivotality | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  control = list(adapt_delta = 0.99),
  file = "cache/fit_brm_exp1_importance_pivotality")
3.3.1.2.3 Number of causes only
df.data = df.exp1.long %>% 
  filter(question == "importance") %>% 
  left_join(df.exp1.model %>% 
              select(trial, pivotality, n_causes),
            by = "trial")

fit_brm_exp1_importance_contribution = brm(
  formula = rating ~ 1 + n_causes + (1 + n_causes | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  control = list(adapt_delta = 0.99),
  file = "cache/fit_brm_exp1_importance_contribution")
3.3.1.2.4 Model comparison
3.3.1.2.4.1 r and rmse
# causal attribution model 
func_model_evaluation(fit = fit_brm_exp1_importance_bayesian,
                      question_type = "importance")

# pivotality model 
func_model_evaluation(fit = fit_brm_exp1_importance_pivotality,
                      question_type = "importance")
         
# number of causes model 
func_model_evaluation(fit = fit_brm_exp1_importance_contribution,
                      question_type = "importance")
# A tibble: 1 x 2
      r  rmse
  <dbl> <dbl>
1  0.92  7.95
# A tibble: 1 x 2
      r  rmse
  <dbl> <dbl>
1  0.88  10.1
# A tibble: 1 x 2
      r  rmse
  <dbl> <dbl>
1  0.54  17.5
3.3.1.2.4.2 Model coefficients
  tibble(model = c("fit_brm_exp1_importance_bayesian",
                   "fit_brm_exp1_importance_pivotality",
                   "fit_brm_exp1_importance_contribution")) %>% 
  mutate(fit = map(model, get)) %>% 
  mutate(tidy = map(fit, ~ tidy(.,
                                effects = "fixed",
                                conf.method = "HPDinterval"))) %>% 
  unnest(tidy) %>% 
  select(-c(fit, effect, component)) %>% 
  print_table()
model term estimate std.error conf.low conf.high
fit_brm_exp1_importance_bayesian (Intercept) 43.66 4.23 35.35 51.79
fit_brm_exp1_importance_bayesian pivotality 51.08 4.95 41.37 60.87
fit_brm_exp1_importance_bayesian n_causes -5.49 0.71 -6.98 -4.22
fit_brm_exp1_importance_pivotality (Intercept) 19.28 3.69 11.94 26.37
fit_brm_exp1_importance_pivotality pivotality 57.26 5.06 48.05 67.87
fit_brm_exp1_importance_contribution (Intercept) 89.60 3.20 83.10 95.53
fit_brm_exp1_importance_contribution n_causes -9.51 0.84 -11.18 -7.89
3.3.1.2.4.3 loo
fit_brm_exp1_importance_bayesian = add_criterion(fit_brm_exp1_importance_bayesian,
                                               criterion = c("loo", "waic"),
                                               reloo = T)

fit_brm_exp1_importance_pivotality = add_criterion(fit_brm_exp1_importance_pivotality,
                                               criterion = c("loo", "waic"),
                                               reloo = T)

fit_brm_exp1_importance_contribution = add_criterion(fit_brm_exp1_importance_contribution,
                                               criterion = c("loo", "waic"),
                                               reloo = T)

loo_compare(fit_brm_exp1_importance_bayesian,
            fit_brm_exp1_importance_pivotality)

loo_compare(fit_brm_exp1_importance_bayesian,
            fit_brm_exp1_importance_contribution)
                                   elpd_diff se_diff
fit_brm_exp1_importance_bayesian     0.0       0.0  
fit_brm_exp1_importance_pivotality -37.4       8.8  
                                     elpd_diff se_diff
fit_brm_exp1_importance_bayesian        0.0       0.0 
fit_brm_exp1_importance_contribution -233.3      23.7 

3.3.2 Specific hypotheses

3.3.2.1 Surprise

df.exp1.posterior_surprise_fitted = df.exp1.regression %>% 
  select(trial,
         question,
         rating = rating_mean,
         surprise) %>% 
  filter(question == "surprise") %>% 
  add_fitted_draws(newdata = .,
                   model = fit_brm_exp1_surprise_bayesian,
                   re_formula = NA) %>% 
  ungroup() %>% 
  select(trial, .value, .draw) %>% 
  spread(trial, .value)

func_posterior_difference = function(data, trial1, trial2){
  data %>% 
    mutate(difference = .data[[trial1]] - .data[[trial2]]) %>% 
    pull(difference) %>% 
    mean_hdci() %>% 
    mutate(across(where(is.numeric), ~round(., 2))) %>% 
    summarize(difference = str_c("(", y, " [", ymin, ", ", ymax, "])")) %>% 
    print()  
}

func_posterior_difference(data = df.exp1.posterior_surprise_fitted,
                          trial1 = "4",
                          trial2 = "3")
            difference
1 (9.63 [7.83, 11.36])

3.3.2.2 Importance

df.exp1.posterior_importance_fitted = df.exp1.regression %>% 
  select(trial,
         question,
         rating_mean,
         pivotality,
         n_causes) %>% 
  spread(question, rating_mean) %>% 
  add_fitted_draws(newdata = .,
                   model = fit_brm_exp1_importance_bayesian,
                   re_formula = NA) %>% 
  ungroup() %>% 
  select(trial, .value, .draw) %>% 
  spread(trial, .value)


# 3 vs. 2 
func_posterior_difference(data = df.exp1.posterior_importance_fitted,
                          trial1 = "3",
                          trial2 = "2")

# 1 and 4 vs. 3 
df.exp1.posterior_importance_fitted %>% 
  mutate(difference = (`1` + `4`)/2 - `3`) %>% 
  pull(difference) %>% 
  mean_hdci() %>% 
  mutate(across(where(is.numeric), ~round(., 2))) %>% 
  summarize(difference = str_c("(", y, " [", ymin, ", ", ymax, "])")) %>% 
  print()

# 4 vs. 1 
func_posterior_difference(data = df.exp1.posterior_importance_fitted,
                          trial1 = "4",
                          trial2 = "1")
              difference
1 (31.79 [26.76, 36.83])
              difference
1 (20.05 [15.02, 25.15])
              difference
1 (21.96 [16.89, 27.93])

3.4 Tables

3.4.1 Trial information

df.exp1.trialinfo %>% 
  select(trial, person, p1:p5, v1:v5, threshold = rule, outcome) %>% 
  # xtable() %>%
  # print(include.rownames = FALSE)
  kable() %>% 
  kable_styling(fixed_thead = T,
                bootstrap_options = "striped")
trial person p1 p2 p3 p4 p5 v1 v2 v3 v4 v5 threshold outcome
1 1 1 1 1 0 0 1 1 1 1 1 5 1
2 1 1 1 1 1 1 1 1 1 1 1 1 1
3 1 0 0 0 0 0 1 1 0 0 0 1 1
4 1 0 0 0 0 0 1 0 0 0 0 1 1
5 1 1 1 1 1 1 1 1 1 1 1 5 1
6 2 1 1 0 0 0 1 0 1 0 0 5 0
7 2 0 0 0 0 0 1 0 0 0 0 5 0
8 3 1 1 1 0 0 1 1 0 0 0 4 0
9 1 1 1 1 0 0 1 1 1 1 1 1 1
10 3 1 1 1 0 0 1 1 0 0 0 3 0
11 2 1 0 0 0 0 0 0 0 0 0 1 0
12 1 1 1 0 0 0 1 1 0 0 0 2 1
13 1 1 1 1 1 0 1 1 1 0 1 4 1
14 4 1 0 0 0 0 1 1 1 0 0 5 0
15 1 0 0 0 0 0 0 0 0 0 0 2 0
16 2 1 0 0 0 0 1 1 0 0 0 2 1
17 4 1 1 1 0 0 1 1 1 1 0 4 1
18 1 1 1 0 0 0 0 0 0 0 0 5 0
19 1 0 0 0 0 0 0 0 0 0 0 3 0
20 1 1 1 0 0 0 1 1 1 1 0 3 1
21 2 0 0 0 0 0 1 0 0 0 0 4 0
22 2 0 0 0 0 0 1 0 0 0 0 2 0
23 2 1 0 0 0 0 1 0 0 0 0 3 0
24 3 1 1 1 1 0 1 1 0 0 0 4 0
25 5 1 0 0 0 0 0 1 1 1 1 1 1
26 5 1 1 0 0 0 0 0 1 1 1 2 1
27 5 0 0 0 0 0 1 1 1 1 1 2 1

3.4.2 Posterior inferences

parameters = fit_brm_exp1_surprise_bayesian %>% 
  fixef() %>% 
  as_tibble() %>%
  pull(Estimate)

df.predictions %>% 
  rowwise() %>%
  mutate(unusual = sum(n_same - n_same_yes) + sum(n_other_yes)) %>% 
  ungroup() %>% 
  filter((unusual / (n_same + n_other)) <= .5) %>% 
  mutate(trial = 1:n()) %>% 
  select(-unusual) %>%
  mutate(surprise_same_yes = parameters[1] + parameters[2] * (1 - vote_same),
         surprise_other_yes = parameters[1] + parameters[2] * (1 - vote_other)) %>% 
  mutate(across(.cols = contains("n_"),
                .fns = ~ as.character(.))) %>%
  mutate(across(.cols = c(contains("party"),
                  contains("vote"),
                  contains("surprise"),
                  policy),
                .fns = ~ round(., 2))) %>% 
  select(trial = situation,
         n_same, n_same_yes, party_same, vote_same, surprise_same_yes, 
         n_other, n_other_yes, party_other, vote_other, surprise_other_yes,
         policy) %>% 
  # xtable() %>%
  # print(include.rownames = FALSE)
  kable() %>% 
  kable_styling(bootstrap_options = "striped") %>% 
  scroll_box(width = "100%")
trial n_same n_same_yes party_same vote_same surprise_same_yes n_other n_other_yes party_other vote_other surprise_other_yes policy
1 0 0 0.67 0.57 28.78 2 0 0.31 0.39 72.88 0.47
2 0 0 0.66 0.59 24.67 2 1 0.34 0.42 64.43 0.51
4 0 0 0.66 0.55 33.36 4 0 0.29 0.36 79.55 0.44
5 0 0 0.67 0.58 27.31 4 1 0.32 0.40 69.92 0.49
6 0 0 0.66 0.59 24.70 4 2 0.34 0.43 63.64 0.51
9 1 0 0.65 0.56 32.10 1 0 0.33 0.39 71.64 0.46
11 1 1 0.68 0.59 24.08 1 0 0.32 0.41 67.87 0.50
12 1 1 0.68 0.60 20.63 1 1 0.35 0.44 59.95 0.53
13 1 0 0.65 0.55 34.28 3 0 0.31 0.38 76.02 0.45
14 1 0 0.65 0.56 30.61 3 1 0.32 0.40 70.94 0.47
17 1 1 0.68 0.57 27.59 3 0 0.30 0.39 73.92 0.47
18 1 1 0.68 0.59 23.48 3 1 0.32 0.41 67.11 0.50
19 1 1 0.67 0.60 20.29 3 2 0.35 0.44 59.67 0.53
22 2 1 0.66 0.58 26.37 0 0 0.33 0.42 66.61 0.50
23 2 2 0.69 0.61 19.77 0 0 0.34 0.43 62.40 0.52
24 2 0 0.64 0.54 36.10 2 0 0.31 0.38 76.44 0.44
27 2 1 0.66 0.57 29.43 2 0 0.30 0.39 73.36 0.47
28 2 1 0.66 0.58 26.28 2 1 0.34 0.42 65.67 0.50
30 2 2 0.69 0.60 21.99 2 0 0.31 0.41 68.40 0.50
31 2 2 0.69 0.61 18.15 2 1 0.34 0.43 61.86 0.53
32 2 2 0.69 0.62 15.61 2 2 0.36 0.46 55.81 0.56
35 3 1 0.65 0.55 32.99 1 0 0.33 0.39 71.78 0.46
37 3 2 0.68 0.59 24.40 1 0 0.33 0.41 67.02 0.50
38 3 2 0.67 0.60 20.93 1 1 0.36 0.44 59.77 0.53
39 3 3 0.70 0.61 18.41 1 0 0.32 0.42 65.13 0.53
40 3 3 0.70 0.63 14.58 1 1 0.35 0.46 56.87 0.56
43 4 2 0.65 0.57 28.09 0 0 0.34 0.41 67.05 0.49
44 4 3 0.69 0.60 20.75 0 0 0.34 0.43 63.15 0.52
45 4 4 0.71 0.63 14.68 0 0 0.34 0.44 60.01 0.55

3.5 Plots

3.5.1 Generative model

func_draw_beta = function(shape1, shape2){
  ggplot(data = tibble(x = c(0, 1)),
         mapping = aes(x = x)) +
    stat_function(fun = "dbeta",
                  args = list(shape1 = shape1,
                              shape2 = shape2),
                  size = 3) +
    scale_x_continuous(breaks = seq(0, 1, 0.25),
                       labels = seq(0, 1, 0.25),
                       limits = c(0, 1)) +
    scale_y_continuous(breaks = 0:4,
                       labels = 0:4,
                       limits = c(0, 4)) +
    theme(text = element_text(size = 36))
  # ggsave(str_c("../../figures/plots/beta_",shape1,"_",shape2, ".pdf"),
  #        width = 8,
  #        height = 6)
}

func_draw_beta_vote = function(data, party){
  ggplot(data = tibble(x = data),
         mapping = aes(x = x)) +
    stat_density(size = 3,
                 bw = 0.05,
                 geom = "line") +
    scale_x_continuous(breaks = seq(0, 1, 0.25),
                       labels = seq(0, 1, 0.25),
                       limits = c(0, 1)) +
    scale_y_continuous(breaks = 0:4,
                       labels = 0:4,
                       limits = c(0, 4)) +
    theme(text = element_text(size = 36))
  
  # ggsave(str_c("../../figures/plots/beta_", party, ".pdf"),
  #          width = 8,
  #          height = 6)
}

# prior distributions 
func_draw_beta(shape1 = 10, shape2 = 5)
func_draw_beta(shape1 = 5, shape2 = 10)
func_draw_beta(shape1 = 7.5, shape2 = 7.5)

# voting distributions 
tmp1 = rbeta(100000, shape1 = 10, shape2 = 5)
tmp2 = rbeta(100000, shape1 = 7.5, shape2 = 7.5)
tmp3 = (tmp1 + tmp2) / 2

tmp4 = rbeta(100000, shape1 = 5, shape2 = 10)
tmp5 = (tmp4 + tmp2) / 2

func_draw_beta_vote(data = tmp3, party = "same")
func_draw_beta_vote(data = tmp5, party = "other")

3.5.2 Means for selection

# \u21e6 = left arrow
# \u2713 = check mark 
# \u2717 = cross mark 
# \n = line break

x_labels = 
  c("T=5\nS \u2713\u21e6\nS \u2713\nS \u2713\nO \u2713\nO \u2713",
    "T=1\nS \u2713\u21e6\nS \u2713\nS \u2713\nS \u2713\nS \u2713",
    "T=1\nO \u2713\u21e6\nO \u2713\nO \u2717\nO \u2717\nO \u2717",
    "T=1\nO \u2713\u21e6\nO \u2717\nO \u2717\nO \u2717\nO \u2717")

df.plot = df.exp1.long %>% 
  filter(trial < 5) %>% 
  mutate(question = factor(question, levels = c("surprise", "importance")))

# get predictions for the surprise and importance means 
df.plot.model = fit_brm_exp1_importance_bayesian %>% 
  fitted(newdata = df.exp1.regression %>% 
           filter(question == "importance",
                  trial <= 4) %>% 
           select(rating = rating_mean,
                  pivotality,
                  n_causes),
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>% 
  mutate(trial = 1:n()) %>% 
  rename(importance_mean = estimate, 
         importance_low = q2_5,
         importance_high = q97_5) %>% 
  left_join(fit_brm_exp1_surprise_bayesian %>% 
              fitted(newdata = df.exp1.regression %>% 
                       filter(question == "surprise",
                              trial <= 4) %>% 
                       select(rating = rating_mean,
                              surprise),
                     re_formula = NA) %>% 
              as_tibble() %>% 
              clean_names() %>% 
              mutate(trial = 1:n()) %>% 
              rename(surprise_mean = estimate, 
                     surprise_low = q2_5,
                     surprise_high = q97_5),
            by = "trial") %>% 
  select(trial, everything(), -contains("error")) %>% 
  gather(key = "key", value = "value", -trial) %>% 
  separate(key, into = c("question", "index")) %>% 
  spread(index, value) %>% 
  rename(rating = mean) %>% 
  mutate(question = factor(question, levels = c("surprise", "importance")))
  
ggplot(data = df.plot,
       mapping = aes(x = trial,
                     y = rating,
                     group = question,
                     fill = question)) + 
  stat_summary(fun = "mean",
               geom = "bar",
               size = 1,
               # shape = 21, 
               color = "black",
               position = position_dodge(width = 0.9)) + 
  stat_summary(fun.data = "mean_cl_boot",
               geom = "linerange",
               size = 1,
               position = position_dodge(width = 0.9)) + 
  geom_point(data = df.plot.model,
             shape = 21,
             size = 4,
             alpha = 0.8,
             position = position_dodge(width = 0.9),
             show.legend = F) + 
  annotate(geom = "text",
           x = 1:4,
           y = 97,
           label = 1:4,
           hjust = 0.5,
           size = 6) + 
  labs(x = "situation",
       y = "mean rating",
       caption = "T = threshold, S = same party, O = other party, \u21e6 = focus, \u2713 = yes, \u2717 = no") + 
  scale_fill_brewer(palette = "Set1") + 
  scale_x_continuous(breaks = c(1:4)-0.15,
                     labels = x_labels) +
  coord_cartesian(ylim = c(0, 101),
                  xlim = c(0.5, 4.5),
                  expand = F) +
  theme(text = element_text(size = 20),
        axis.text.x = element_text(family = "Arial Unicode MS",
                                   hjust = 0),
        axis.title.x = element_blank(),
        axis.ticks.x = element_blank(),
        legend.title = element_blank(),
        legend.text = element_text(margin = margin(l = 0.2, unit = "cm")),
        plot.caption = element_text(family = "Arial Unicode MS",
                                    hjust = 1,
                                    margin = margin(t = 0.5, unit = "cm"),
                                    size = 12))

# ggsave(file = "../../figures/plots/experiment1_bars.pdf",
#        width = 8,
#        height = 5,
#        device = cairo_pdf)

3.5.3 Scatterplots

3.5.3.1 Surprise

df.plot = fit_brm_exp1_surprise_bayesian %>%
  fitted(newdata = df.exp1.regression %>% 
           filter(question == "surprise"),
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp1.regression %>%
              filter(question == "surprise")) %>%
  mutate(color = ifelse(trial <= 4, 1, 0),
         color = as.factor(color)) %>% 
  arrange(desc(trial)) %>% 
  rename(rating = rating_mean)


x = "estimate"
y = "rating"

ggplot(data = df.plot,
       mapping = aes(x = .data[[x]],
                     y = .data[[y]],
                     color = color,
                     label = trial)) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = .data[[x]],
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.2) +
  geom_point(size = 2) +
  geom_text(data = df.plot %>% filter(trial <= 4),
            nudge_x = 4,
            nudge_y = -1,
            size = 6) +
  annotate(geom = "text",
           label = df.plot %>% 
             summarize(r = cor(.data[[x]], .data[[y]]),
                       rmse = rmse(.data[[x]], .data[[y]])) %>% 
             mutate(across(.fns = ~ round(., 2))) %>% 
             unlist() %>% 
             str_c(names(.), " = ", .),
           x = c(0, 0), 
           y = c(100, 90),
           size = 7,
           hjust = 0) + 
  labs(x = "dispositional inference model",
       y = "mean surprise rating") +
  scale_color_manual(values = c("black", "#e41a1c"),
                     guide = F) +
  scale_x_continuous(breaks = seq(0, 100, 25)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  coord_cartesian(xlim = c(0, 100),
                  ylim = c(0, 100)) +
  theme(text = element_text(size = 24),
        axis.title.x = element_text(size = 22))

# ggsave(file = "../../figures/plots/experiment1_surprise_scatter.pdf",
#        width = 5,
#        height = 5)

3.5.3.2 Importance

df.plot = fit_brm_exp1_importance_bayesian %>% 
  fitted(newdata = df.exp1.regression %>% 
           filter(question == "importance"),
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp1.regression %>%
              filter(question == "importance")) %>%
  mutate(color = ifelse(trial <= 4, 1, 0),
         color = as.factor(color)) %>% 
  arrange(desc(trial)) %>% 
  rename(rating = rating_mean)


x = "estimate"
y = "rating"

ggplot(data = df.plot,
       mapping = aes(x = .data[[x]],
                     y = .data[[y]],
                     color = color,
                     label = trial)) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = .data[[x]],
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.2) +
  geom_point(size = 2) +
  geom_text(data = df.plot %>% filter(trial <= 4),
            nudge_x = 4,
            nudge_y = -1,
            size = 6) +
  annotate(geom = "text",
           label = df.plot %>% 
             summarize(r = cor(.data[[x]], .data[[y]]),
                       rmse = rmse(.data[[x]], .data[[y]])) %>% 
             mutate(across(.fns = ~ round(., 2))) %>% 
             unlist() %>% 
             str_c(names(.), " = ", .),
           x = c(0, 0), 
           y = c(100, 90),
           size = 7,
           hjust = 0) + 
  labs(x = "causal attribution model",
       y = "mean importance rating") +
  scale_color_manual(values = c("black", "#377eb8"),
                     guide = F) +
  scale_x_continuous(breaks = seq(0, 100, 25)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  coord_cartesian(xlim = c(0, 100),
                  ylim = c(0, 100)) +
  theme(text = element_text(size = 24),
        axis.title.x = element_text(size = 22))

# ggsave(file = "../../figures/plots/experiment1_importance_scatter.pdf",
#        width = 5,
#        height = 5)

3.5.4 Tile plot of Bayesian model fit

load(file = "data/bayesian_model_fits_surprise.RData")

ggplot(data = df.bayesian_fit,
       mapping = aes(x = a,
                     y = b,
                     fill = rmse)) +
  geom_tile(color = "black") +
  geom_point(data = df.bayesian_fit %>% 
               filter(rmse == min(rmse)),
             shape = 21, 
             fill = "red",
             size =  5) + 
  scale_fill_gradient(high = "white", low = "black") + 
  scale_x_continuous(breaks = 2:19) + 
  scale_y_continuous(breaks = 1:9) +
  theme(text = element_text(size = 20),
        legend.position = c(1, 1),
        legend.justification = c(1, 1))

# ggsave("../../figures/plots/experiment1_tiles.pdf",
#        width = 8, 
#        height = 6)

4 Experiment 2: Responsibility judgments

4.1 Read in data

con = dbConnect(SQLite(),dbname = "../../data/experiment_2/experiment_2.db");
df.exp2 = dbReadTable(con,"voting_three_five")
dbDisconnect(con)
df.exp2 = df.exp2 %>% 
  filter(status %in% c(3, 4))

4.2 Demographics

df.exp2.demographics = df.exp2$datastring %>% 
  as.tbl_json() %>%
  enter_object("questiondata") %>% 
  gather_object() %>% 
  append_values_string() %>% 
  as_tibble() %>% 
  rename(participant = document.id) %>% 
  spread(name, string) %>% 
  mutate(age = as.numeric(age),
         condition = as.factor(condition)) %>% 
  mutate(begin = df.exp2$beginhit,
         end =  df.exp2$endhit,
         time = as.duration(interval(ymd_hms(df.exp2$beginexp), ymd_hms(df.exp2$endhit)))) %>% 
  select(participant, sex, age, condition, time, feedback)
df.exp2.demographics %>% 
  summarize(age_mean = round(mean(age)),
            age_sd = round(sd(age)),
            time_mean = round(mean(time)/60, 2),
            time_sd = round(sd(time)/60, 2),
            n_female = sum(sex == "female"),
            n_male = sum(sex == "male")) %>% 
  print_table()
age_mean age_sd time_mean time_sd n_female n_male
36 14 5.96 5.17 86 122

4.2.1 Participant feedback

df.exp2.demographics %>% 
  select(participant, feedback) %>% 
  datatable()

4.3 Responsibility judgments

df.exp2.wide = data.frame(participant = 1:nrow(df.exp2))

for (i in 1:nrow(df.exp2)){
  a = rjson::fromJSON(df.exp2$datastring[i])
  for (j in 1:length(a$data)){
    df.exp2.wide[[str_c("trial_",j-1)]][i] = 
      a$data[[j]]$trialdata[[1]]
    df.exp2.wide[[str_c("rating1_",j-1)]][i] = 
      a$data[[j]]$trialdata[[3]][[1]]
    df.exp2.wide[[str_c("rating2_",j-1)]][i] = 
      a$data[[j]]$trialdata[[3]][[2]]
    df.exp2.wide[[str_c("rating3_",j-1)]][i] = 
      a$data[[j]]$trialdata[[3]][[3]]
    df.exp2.wide[[str_c("rating4_",j-1)]][i] = 
      a$data[[j]]$trialdata[[3]][[4]]
    df.exp2.wide[[str_c("rating5_",j-1)]][i] = 
      a$data[[j]]$trialdata[[3]][[5]]
    df.exp2.wide[[str_c("party1_",j-1)]][i] = 
      a$data[[j]]$trialdata[[5]][1]
    df.exp2.wide[[str_c("party2_",j-1)]][i] = 
      a$data[[j]]$trialdata[[5]][2]
    df.exp2.wide[[str_c("party3_",j-1)]][i] = 
      a$data[[j]]$trialdata[[5]][3]
    df.exp2.wide[[str_c("party4_",j-1)]][i] = 
      a$data[[j]]$trialdata[[5]][4]
    df.exp2.wide[[str_c("party5_",j-1)]][i] = 
      a$data[[j]]$trialdata[[5]][5]
    df.exp2.wide[[str_c("vote1_",j-1)]][i] = 
      a$data[[j]]$trialdata[[7]][1]
    df.exp2.wide[[str_c("vote2_",j-1)]][i] = 
      a$data[[j]]$trialdata[[7]][2]
    df.exp2.wide[[str_c("vote3_",j-1)]][i] = 
      a$data[[j]]$trialdata[[7]][3]
    df.exp2.wide[[str_c("vote4_",j-1)]][i] = 
      a$data[[j]]$trialdata[[7]][4]
    df.exp2.wide[[str_c("vote5_",j-1)]][i] = 
      a$data[[j]]$trialdata[[7]][5]
    df.exp2.wide[[str_c("support_",j-1)]][i] = 
      a$data[[j]]$trialdata[[9]]
    df.exp2.wide[[str_c("rule_",j-1)]][i] = 
      a$data[[j]]$trialdata[[11]]
    df.exp2.wide[[str_c("outcome_",j-1)]][i] = 
      a$data[[j]]$trialdata[[13]]
  }
}

# make a tidy data frame 
df.exp2.long = wideToLong(df.exp2.wide,
                          within = "trialOrder") %>% 
  select(participant, trial, contains("rating"), contains("party"), contains("vote"), support, rule, outcome) %>% 
  gather("index", "value", rating1:vote5) %>% 
  separate(index, sep = -1, into = c("index", "person")) %>% 
  spread(index, value) %>% 
  mutate(trial = str_remove(trial, "trial_"),
         trial = as.numeric(trial)) %>% 
  group_by(participant) %>% 
  mutate(order = rep(1:17, each = 5),
         rating = ifelse(rating == "NA", NA, rating),
         rating = as.numeric(rating)) %>% 
  ungroup() %>% 
  group_by(participant, trial) %>% 
  mutate(size = ifelse(any(is.na(party)), 3, 5),
         sliders = ifelse(sum(!is.na(rating)) == 2, 2, 1)) %>%
  ungroup() %>% 
  left_join(df.exp2.demographics %>% 
              select(participant, condition),
            by = "participant") %>% 
  mutate(across(c(person, party, vote), as.numeric)) %>% 
  select(participant, condition, trial, order, support, rule, outcome, size, sliders, person, party, vote, rating) %>% 
  filter(!is.na(party))
  
# remove variables 
rm(list = c("df.exp2.wide", "a"))

4.4 Model predictions

Determine predictions for all the cases:

df.exp2.predictions = df.exp2.long %>% 
  select(condition, trial, support, rule, outcome, size, sliders, person, party, vote) %>% 
  distinct() %>% 
  arrange(trial, person) %>% 
  mutate(outcome = ifelse(outcome == "not passed", 0, 1)) %>% 
  na.omit() %>% 
  group_by(trial) %>% 
  mutate(pivotality = ifelse(vote != outcome, 0, 
                             ifelse(outcome == 0, 
                                    1 / (abs(sum(vote) - rule)),
                                    1 / (abs(sum(vote) - rule) + 1))),
         n_causes = ifelse(vote == 1,
                                 sum(vote),
                                 sum(1 - vote))) %>% 
  ungroup() %>% 
  mutate(norm_disposition = ifelse(party == vote, 1, 0))
  
df.exp2.predictions = df.exp2.predictions %>% 
  select(trial:vote) %>% 
  group_by(trial) %>% 
  mutate(n_same = sum(party == 1),
         n_other = sum(party == 0),
         n_same_yes = sum(party == 1 & vote == 1),
         n_other_yes = sum(party == 0 & vote == 1)) %>% 
  ungroup() %>% 
  left_join(df.exp2.long %>% 
              mutate(focus = !is.na(rating),
                     focus = focus * person) %>% 
              select(trial, person, focus) %>% 
              distinct(),
            by = c("trial", "person")) %>% 
  filter(focus != 0) %>% 
  mutate(n_same = ifelse(party == 1,
                         n_same - 1,
                         n_same),
         n_other = ifelse(party == 0,
                         n_other - 1,
                         n_other),
         n_same_yes = ifelse(party == 1 & vote == 1,
                             n_same_yes - 1,
                             n_same_yes),
         n_other_yes = ifelse(party == 0 & vote == 1,
                             n_other_yes - 1,
                             n_other_yes)) %>% 
  group_by(trial) %>% 
  mutate(index = 1:n()) %>% 
  ungroup() %>% 
  left_join(df.exp2.predictions,
            by = c("trial", 
                   "support", 
                   "rule", 
                   "outcome", 
                   "size", 
                   "sliders", 
                   "person", 
                   "party", 
                   "vote")) %>% 
  left_join(df.predictions %>% 
              select(vote_same, vote_other, n_same:n_other_yes),
            by = c("n_same", "n_other", "n_same_yes", "n_other_yes")) %>% 
  mutate(surprise = NA,
         surprise = ifelse(party == 1 & vote == 1,
                           1 - vote_same,
                           surprise),
         surprise = ifelse(party == 1 & vote == 0,
                           vote_same,
                           surprise),
         surprise = ifelse(party == 0 & vote == 1,
                           1 - vote_other,
                           surprise),
         surprise = ifelse(party == 0 & vote == 0,
                           vote_other,
                           surprise)) %>% 
  select(trial, index, condition, everything())

4.5 Stats

4.5.1 Bayesian mixed effects models

4.5.1.1 For the selection of 24 cases

4.5.1.1.1 Structure the data
df.exp2.selection = df.exp2.long %>% 
  mutate(outcome = ifelse(outcome == "not passed", 0 , 1)) %>% 
  left_join(df.exp1.trialinfo %>% 
              select(trial_exp1 = trial, trial = trial_exp2, person),
            by = c("trial", "person")) %>% 
  na.omit() %>% 
  # model predictions 
  left_join(df.exp2.predictions,
            by = c("condition",
                   "trial",
                   "support",
                   "rule",
                   "outcome",
                   "size",
                   "sliders",
                   "person",
                   "party",
                   "vote")) %>% 
  # surprise and importance judgments from experiment 1
  left_join(df.exp1.regression %>% 
              select(trial, question, rating_mean) %>% 
              spread(question, rating_mean) %>% 
              rename(importance_empirical = importance,
                     surprise_empirical = surprise),
            by = c("trial_exp1" = "trial")) %>% 
  na.omit()
4.5.1.1.2 Empirical
fit_brm_exp2_selection_empirical = brm(formula = rating ~ 1 + surprise_empirical + importance_empirical + (1 + surprise_empirical + importance_empirical | participant),
                                       data = df.exp2.selection,
                                       cores = 2,
                                       seed = 1,
                                       file = "cache/fit_brm_exp2_selection_empirical")

fit_brm_exp2_selection_empirical %>% summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + surprise_empirical + importance_empirical + (1 + surprise_empirical + importance_empirical | participant) 
   Data: df.exp2.selection (Number of observations: 499) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                                             Estimate Est.Error l-95% CI
sd(Intercept)                                   34.13      4.05    26.31
sd(surprise_empirical)                           0.44      0.12     0.18
sd(importance_empirical)                         0.54      0.10     0.35
cor(Intercept,surprise_empirical)               -0.05      0.23    -0.48
cor(Intercept,importance_empirical)             -0.61      0.12    -0.80
cor(surprise_empirical,importance_empirical)    -0.67      0.16    -0.91
                                             u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Intercept)                                   42.13 1.00      991     2154
sd(surprise_empirical)                           0.65 1.01      268      756
sd(importance_empirical)                         0.73 1.01      571      743
cor(Intercept,surprise_empirical)                0.40 1.01      601      921
cor(Intercept,importance_empirical)             -0.34 1.01      827     1532
cor(surprise_empirical,importance_empirical)    -0.29 1.01      760      667

Population-Level Effects: 
                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
Intercept               29.77      4.05    21.99    37.80 1.00     2603
surprise_empirical       0.14      0.06     0.01     0.26 1.00     3111
importance_empirical     0.43      0.08     0.27     0.57 1.00     2102
                     Tail_ESS
Intercept                3031
surprise_empirical       3018
importance_empirical     2645

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    17.84      1.10    15.81    20.04 1.01      412     1401

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
4.5.1.1.3 Model
df.data = fitted(newdata = df.exp2.selection,
                 object = fit_brm_exp1_surprise_bayesian,
                 re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>% 
  select(surprise_model = estimate) %>% 
  bind_cols(
    fitted(newdata = df.exp2.selection,
           object = fit_brm_exp1_importance_bayesian,
           re_formula = NA) %>% 
      as_tibble() %>% 
      clean_names() %>% 
      select(importance_model = estimate)
  ) %>% 
  bind_cols(df.exp2.selection)
  
fit_brm_exp2_selection_model = brm(formula = rating ~ 1 + surprise_model + importance_model + (1 + surprise_model + importance_model | participant),
                                       data = df.data,
                                       cores = 2,
                                       seed = 1,
                                       file = "cache/fit_brm_exp2_selection_model")

fit_brm_exp2_selection_model %>% summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + surprise_model + importance_model + (1 + surprise_model + importance_model | participant) 
   Data: df.data (Number of observations: 499) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                                     Estimate Est.Error l-95% CI u-95% CI Rhat
sd(Intercept)                           31.77      4.29    23.58    40.29 1.00
sd(surprise_model)                       0.46      0.10     0.25     0.64 1.00
sd(importance_model)                     0.48      0.10     0.26     0.66 1.01
cor(Intercept,surprise_model)           -0.17      0.21    -0.58     0.24 1.00
cor(Intercept,importance_model)         -0.50      0.15    -0.73    -0.16 1.00
cor(surprise_model,importance_model)    -0.69      0.15    -0.90    -0.36 1.00
                                     Bulk_ESS Tail_ESS
sd(Intercept)                            1295     1798
sd(surprise_model)                        606      778
sd(importance_model)                      540      645
cor(Intercept,surprise_model)             815      906
cor(Intercept,importance_model)          1020     1488
cor(surprise_model,importance_model)     1090     1736

Population-Level Effects: 
                 Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept           28.14      4.13    20.17    36.22 1.00     2831     3326
surprise_model       0.20      0.06     0.08     0.32 1.00     3833     3017
importance_model     0.41      0.07     0.27     0.55 1.00     2864     3037

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    18.28      1.10    16.22    20.54 1.00      617     1030

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
4.5.1.1.4 Lesioned models

Surprise only

fit_brm_exp2_selection_empirical_surprise = brm(formula = rating ~ 1 + surprise_empirical + (1 + surprise_empirical | participant),
                                       data = df.exp2.selection,
                                       cores = 2,
                                       seed = 1,
                                       file = "cache/fit_brm_exp2_selection_empirical_surprise")

fit_brm_exp2_selection_empirical_surprise %>% summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + surprise_empirical + (1 + surprise_empirical | participant) 
   Data: df.exp2.selection (Number of observations: 499) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                                  Estimate Est.Error l-95% CI u-95% CI Rhat
sd(Intercept)                        26.05      3.04    20.26    32.39 1.00
sd(surprise_empirical)                0.24      0.08     0.10     0.39 1.00
cor(Intercept,surprise_empirical)    -0.90      0.09    -1.00    -0.68 1.00
                                  Bulk_ESS Tail_ESS
sd(Intercept)                         1316     2047
sd(surprise_empirical)                1046     1762
cor(Intercept,surprise_empirical)     1175     1718

Population-Level Effects: 
                   Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept             44.77      3.19    38.28    50.90 1.00     1897     2349
surprise_empirical     0.35      0.05     0.24     0.46 1.00     2757     2777

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    22.30      0.88    20.61    24.03 1.00     2057     2899

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Importance only

fit_brm_exp2_selection_empirical_importance = brm(formula = rating ~ 1 + importance_empirical + (1 + importance_empirical | participant),
                                       data = df.exp2.selection,
                                       cores = 2,
                                       seed = 1,
                                       file = "cache/fit_brm_exp2_selection_empirical_importance")

fit_brm_exp2_selection_empirical_importance %>% summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + importance_empirical + (1 + importance_empirical | participant) 
   Data: df.exp2.selection (Number of observations: 499) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                                    Estimate Est.Error l-95% CI u-95% CI Rhat
sd(Intercept)                          32.95      4.42    24.20    41.76 1.00
sd(importance_empirical)                0.41      0.09     0.22     0.56 1.00
cor(Intercept,importance_empirical)    -0.80      0.08    -0.90    -0.62 1.00
                                    Bulk_ESS Tail_ESS
sd(Intercept)                            913     1249
sd(importance_empirical)                 451      625
cor(Intercept,importance_empirical)     1689     1454

Population-Level Effects: 
                     Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
Intercept               30.22      4.09    22.32    38.44 1.00     1805
importance_empirical     0.52      0.06     0.40     0.64 1.00     2075
                     Tail_ESS
Intercept                2356
importance_empirical     2804

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    18.87      1.01    17.01    20.94 1.00      707     1491

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

4.5.1.2 Overall

4.5.1.2.1 Structure the data
# individual data points
df.exp2.regression = df.exp2.long %>% 
  na.omit() %>% 
  group_by(participant, trial) %>% 
  mutate(index = 1:n()) %>% 
  ungroup() %>% 
  select(-outcome) %>% 
  left_join(df.exp2.predictions %>% 
              select(trial, index, pivotality, n_causes, surprise, outcome, norm_disposition),
            by = c("trial", "index"))

# means 
df.exp2.regression.means = df.exp2.regression %>% 
  group_by(trial, index, support, rule, size, sliders, person, party, vote, pivotality, n_causes, 
           surprise, outcome, norm_disposition) %>% 
  summarize(rating_mean = smean.cl.boot(rating)[1],
            rating_low = smean.cl.boot(rating)[2],
            rating_high = smean.cl.boot(rating)[3]) %>% 
  ungroup()
4.5.1.2.2 Surprise
fit_brm_exp2_responsibility_surprise = brm(
  formula = rating ~ 1 + surprise + (1 + surprise | participant),
  data = df.exp2.regression,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp2_responsibility_surprise")

fit_brm_exp2_responsibility_surprise %>% 
  summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + surprise + (1 + surprise | participant) 
   Data: df.exp2.regression (Number of observations: 5199) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                        Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)              27.94      2.49    23.15    32.99 1.00     1615
sd(surprise)               40.94      5.65    30.13    52.16 1.00      822
cor(Intercept,surprise)    -0.80      0.05    -0.87    -0.69 1.00     2193
                        Tail_ESS
sd(Intercept)               2571
sd(surprise)                1393
cor(Intercept,surprise)     2824

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    41.81      2.57    36.54    46.93 1.00     3388     3129
surprise     36.95      4.53    28.11    45.97 1.00     4041     3375

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    22.66      0.23    22.20    23.12 1.00     6031     3092

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
4.5.1.2.3 Pivotality + n_causes + surprise
fit_brm_exp2_responsibility_bayesian = brm(
  formula = rating ~ 1 + pivotality + n_causes + surprise + (1 + pivotality + n_causes + surprise | participant),
  data = df.exp2.regression,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp2_responsibility_bayesian")
4.5.1.2.4 Pivotality + n_causes
fit_brm_exp2_responsibility_importance = brm(
  formula = rating ~ 1 + pivotality + n_causes + (1 + pivotality + n_causes | participant),
  data = df.exp2.regression,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp2_responsibility_importance")

fit_brm_exp2_responsibility_importance %>% summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + pivotality + n_causes + (1 + pivotality + n_causes | participant) 
   Data: df.exp2.regression (Number of observations: 5199) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                26.26      1.97    22.56    30.26 1.00     1939
sd(pivotality)               21.21      1.52    18.32    24.21 1.00     1583
sd(n_causes)                  5.92      0.43     5.15     6.80 1.00      488
cor(Intercept,pivotality)    -0.78      0.04    -0.85    -0.70 1.00     1706
cor(Intercept,n_causes)      -0.52      0.07    -0.64    -0.37 1.00      553
cor(pivotality,n_causes)      0.22      0.09     0.03     0.40 1.00      721
                          Tail_ESS
sd(Intercept)                 2261
sd(pivotality)                2618
sd(n_causes)                  1219
cor(Intercept,pivotality)     2725
cor(Intercept,n_causes)       1550
cor(pivotality,n_causes)      1457

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     70.95      2.28    66.52    75.33 1.00     3181     2921
pivotality    13.55      1.83     9.97    17.06 1.00     3152     2893
n_causes      -5.98      0.49    -6.93    -5.02 1.00     1641     2447

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    19.18      0.20    18.79    19.59 1.00     6669     2890

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
4.5.1.2.5 Pivotality + n_causes + surprise + outcome
fit_brm_exp2_responsibility_bayesian_outcome = brm(
  formula = rating ~ 1 + pivotality + n_causes + surprise + outcome + (1 + pivotality + n_causes + surprise + outcome | participant),
  data = df.exp2.regression,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp2_responsibility_bayesian_outcome")

fit_brm_exp2_responsibility_bayesian_outcome %>% summary()
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: rating ~ 1 + pivotality + n_causes + surprise + outcome + (1 + pivotality + n_causes + surprise + outcome | participant) 
   Data: df.exp2.regression (Number of observations: 5199) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 208) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                35.77      2.87    30.28    41.51 1.00     1652
sd(pivotality)               21.67      1.51    18.81    24.84 1.00     1372
sd(n_causes)                  5.88      0.43     5.11     6.75 1.01      718
sd(surprise)                 47.55      4.35    39.10    56.05 1.00      847
sd(outcome)                   7.09      0.82     5.50     8.65 1.00     1409
cor(Intercept,pivotality)    -0.61      0.07    -0.73    -0.47 1.00     1016
cor(Intercept,n_causes)      -0.32      0.09    -0.48    -0.14 1.01      596
cor(pivotality,n_causes)      0.22      0.09     0.03     0.39 1.00      587
cor(Intercept,surprise)      -0.67      0.06    -0.78    -0.53 1.00     1151
cor(pivotality,surprise)      0.08      0.11    -0.13     0.29 1.00     1254
cor(n_causes,surprise)       -0.07      0.11    -0.28     0.14 1.00      632
cor(Intercept,outcome)       -0.25      0.13    -0.50     0.00 1.01     1287
cor(pivotality,outcome)      -0.18      0.12    -0.41     0.06 1.00     1533
cor(n_causes,outcome)        -0.21      0.12    -0.44     0.02 1.00     1992
cor(surprise,outcome)         0.37      0.14     0.10     0.64 1.00     1116
                          Tail_ESS
sd(Intercept)                 2251
sd(pivotality)                2021
sd(n_causes)                  1585
sd(surprise)                  1406
sd(outcome)                   1969
cor(Intercept,pivotality)     1984
cor(Intercept,n_causes)       1625
cor(pivotality,n_causes)       977
cor(Intercept,surprise)       2059
cor(pivotality,surprise)      1854
cor(n_causes,surprise)        1533
cor(Intercept,outcome)        2132
cor(pivotality,outcome)       2898
cor(n_causes,outcome)         2858
cor(surprise,outcome)         1476

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     57.86      3.16    51.66    64.16 1.00     1908     2357
pivotality    13.46      1.81     9.97    16.96 1.00     1771     2432
n_causes      -5.68      0.48    -6.60    -4.76 1.00     1133     1852
surprise      21.54      4.45    12.43    30.20 1.00     1993     2832
outcome        4.49      0.75     2.98     6.01 1.00     2708     2420

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    18.18      0.20    17.79    18.58 1.00     3023     2877

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

4.5.2 Model comparison

4.5.2.1 For the selection of 24 cases

4.5.2.1.1 r and RMSE
4.5.2.1.1.1 Based on empirical ratings
df.data = df.exp2.regression %>%
  left_join(df.exp1.trialinfo %>% 
              select(trial_exp1 = trial, trial = trial_exp2, person),
            by = c("trial", "person")) %>% 
  na.omit() %>% 
  left_join(df.exp1.regression %>% 
              select(trial, question, rating_mean) %>% 
              spread(question, rating_mean) %>% 
              rename(importance_empirical = importance,
                     surprise_empirical = surprise),
            by = c("trial_exp1" = "trial")) %>% 
  group_by(trial) %>% 
  summarize(rating = mean(rating),
            surprise_empirical = mean(surprise_empirical),
            importance_empirical = mean(importance_empirical))

fit_brm_exp2_selection_empirical %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  bind_cols(df.data) %>% 
  clean_names() %>%
  summarize(r_empirical = cor(estimate, rating),
            rmse_empirical = rmse(estimate, rating)) %>% 
  mutate(across(.cols = everything(),
                .fns = ~round(., 2)))
# A tibble: 1 x 2
  r_empirical rmse_empirical
        <dbl>          <dbl>
1        0.86           6.59
4.5.2.1.1.2 Based on model predictions
df.tmp = df.exp2.selection %>% 
  distinct(trial, index) %>% 
  left_join(df.exp2.regression.means,
            by = c("trial", "index")) %>% 
  rename(rating = rating_mean)

df.data = fitted(newdata = df.tmp,
                 object = fit_brm_exp1_surprise_bayesian,
                 re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>% 
  select(surprise_model = estimate) %>% 
  bind_cols(fitted(newdata = df.tmp,
                   object = fit_brm_exp1_importance_bayesian,
                   re_formula = NA) %>% 
              as_tibble() %>% 
              clean_names() %>% 
              select(importance_model = estimate)) %>% 
  bind_cols(df.tmp)

fit_brm_exp2_selection_model %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  bind_cols(df.data) %>% 
  clean_names() %>% 
  summarize(r_model = cor(estimate, rating),
            rmse_model = rmse(estimate, rating)) %>% 
  print_table()
r_model rmse_model
0.85 6.76

4.5.2.2 Overall

4.5.2.2.1 r and RMSE
df.data = df.exp2.regression.means %>% 
  rename(rating = rating_mean)

# FULL MODEL 
fit_brm_exp2_responsibility_bayesian %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  clean_names() %>% 
  bind_cols(df.data) %>% 
  summarize(r = cor(estimate, rating),
            rmse = rmse(estimate, rating)) %>% 
  print_table()

# ONLY SURPRISE MODEL 
fit_brm_exp2_responsibility_surprise %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  clean_names() %>% 
  bind_cols(df.data) %>% 
  summarize(r = cor(estimate, rating),
            rmse = rmse(estimate, rating)) %>% 
  print_table()

# ONLY IMPORTANCE MODEL 
fit_brm_exp2_responsibility_importance %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  clean_names() %>% 
  bind_cols(df.data) %>% 
  summarize(r = cor(estimate, rating),
            rmse = rmse(estimate, rating)) %>% 
  print_table()

# FULL MODEL WITH OUTCOME 
fit_brm_exp2_responsibility_bayesian_outcome %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  clean_names() %>% 
  bind_cols(df.data) %>% 
  summarize(r = cor(estimate, rating),
            rmse = rmse(estimate, rating)) %>% 
  print_table()
r rmse
0.77 8.16
r rmse
0.29 12.36
r rmse
0.76 8.37
r rmse
0.81 7.6
4.5.2.2.2 loo
fit_brm_exp2_responsibility_bayesian = add_criterion(fit_brm_exp2_responsibility_bayesian, 
                                                     criterion = c("loo", "waic"),
                                                     reloo = T)

fit_brm_exp2_responsibility_importance = add_criterion(fit_brm_exp2_responsibility_importance, 
                                                       criterion = c("loo", "waic"),
                                                       reloo = T)

fit_brm_exp2_responsibility_bayesian_outcome = add_criterion(fit_brm_exp2_responsibility_bayesian_outcome,
                                                             criterion = c("loo", "waic"),
                                                             reloo = T)

loo_compare(fit_brm_exp2_responsibility_bayesian,
            fit_brm_exp2_responsibility_importance)

loo_compare(fit_brm_exp2_responsibility_bayesian,
            fit_brm_exp2_responsibility_bayesian_outcome)
                                       elpd_diff se_diff
fit_brm_exp2_responsibility_bayesian     0.0       0.0  
fit_brm_exp2_responsibility_importance -89.4      16.1  
                                             elpd_diff se_diff
fit_brm_exp2_responsibility_bayesian_outcome   0.0       0.0  
fit_brm_exp2_responsibility_bayesian         -70.7      14.6  

4.6 Tables

4.6.1 Trial information

4.6.1.1 3 committee cases

df.exp2.long %>% 
  filter(size == 3) %>% 
  select(trial, rule, outcome, person, party, vote) %>% 
  distinct() %>% 
  unite("party_vote", c(party, vote)) %>% 
  spread(person, party_vote) %>% 
  separate(`1`, into = c("p1", "v1")) %>% 
  separate(`2`, into = c("p2", "v2")) %>% 
  separate(`3`, into = c("p3", "v3")) %>% 
  mutate(outcome = factor(outcome,
                          levels = c("not passed", "passed"),
                          labels = 0:1)) %>% 
  select(trial, p1, p2, p3, v1, v2, v3, threshold = rule, outcome) %>% 
  # xtable(digits = 0) %>%
  # print(include.rownames = F,
  #       booktabs = T)
  kable() %>% 
  kable_styling(fixed_thead = T,
                bootstrap_options = "striped") %>% 
  scroll_box(height = "500px")
trial p1 p2 p3 v1 v2 v3 threshold outcome
1 0 0 0 0 0 0 1 0
2 0 0 0 1 0 0 1 1
3 1 0 0 0 0 0 1 0
4 1 0 0 1 0 0 1 1
5 1 0 0 1 1 0 1 1
6 1 1 0 1 0 0 1 1
7 1 1 0 1 1 0 1 1
8 1 1 0 1 1 1 1 1
9 1 1 1 1 1 0 1 1
10 1 1 1 1 1 1 1 1
11 0 0 0 0 0 0 2 0
12 0 0 0 1 0 0 2 0
13 1 0 0 0 0 0 2 0
14 1 0 0 1 0 0 2 0
15 1 0 0 1 1 0 2 1
16 1 1 0 1 0 0 2 0
17 1 1 0 1 1 0 2 1
18 1 1 0 1 1 1 2 1
19 1 1 1 1 1 0 2 1
20 1 1 1 1 1 1 2 1
21 0 0 0 0 0 0 3 0
22 0 0 0 1 0 0 3 0
23 1 0 0 0 0 0 3 0
24 1 0 0 1 0 0 3 0
25 1 0 0 1 1 0 3 0
26 1 1 0 1 0 0 3 0
27 1 1 0 1 1 0 3 0
28 1 1 0 1 1 1 3 1
29 1 1 1 1 1 0 3 0
30 1 1 1 1 1 1 3 1

4.6.1.2 5 committee cases

df.exp2.long %>% 
  filter(size == 5) %>% 
  select(trial, rule, outcome, person, party, vote) %>% 
  distinct() %>% 
  unite("party_vote", c(party, vote)) %>% 
  spread(person, party_vote) %>% 
  separate(`1`, into = c("p1", "v1")) %>% 
  separate(`2`, into = c("p2", "v2")) %>% 
  separate(`3`, into = c("p3", "v3")) %>% 
  separate(`4`, into = c("p4", "v4")) %>% 
  separate(`5`, into = c("p5", "v5")) %>% 
  mutate(outcome = factor(outcome,
                          levels = c("not passed", "passed"),
                          labels = 0:1)) %>% 
  select(trial, p1, p2, p3, p4, p5, v1, v2, v3, v4, v5, threshold = rule, outcome) %>% 
  # xtable(digits = 0) %>%
  # print(include.rownames = F,
  #       booktabs = T)
  kable() %>% 
  kable_styling(fixed_thead = T,
                bootstrap_options = "striped") %>% 
  scroll_box(height = "500px")
trial p1 p2 p3 p4 p5 v1 v2 v3 v4 v5 threshold outcome
31 0 0 0 0 0 0 0 0 0 0 1 0
32 0 0 0 0 0 1 0 0 0 0 1 1
33 0 0 0 0 0 1 1 0 0 0 1 1
34 1 0 0 0 0 0 0 0 0 0 1 0
35 1 0 0 0 0 1 0 0 0 0 1 1
36 1 0 0 0 0 0 1 0 0 0 1 1
37 1 0 0 0 0 1 1 0 0 0 1 1
38 1 0 0 0 0 1 1 1 0 0 1 1
39 1 1 0 0 0 0 0 0 0 0 1 0
40 1 1 0 0 0 1 0 0 0 0 1 1
41 1 1 0 0 0 1 1 0 0 0 1 1
42 1 1 0 0 0 1 0 1 0 0 1 1
43 1 1 0 0 0 1 1 1 0 0 1 1
44 1 1 0 0 0 1 1 1 1 0 1 1
45 1 1 1 0 0 1 0 0 0 0 1 1
46 1 1 1 0 0 1 1 0 0 0 1 1
47 1 1 1 0 0 1 1 1 0 0 1 1
48 1 1 1 0 0 1 1 0 1 0 1 1
49 1 1 1 0 0 1 1 1 1 0 1 1
50 1 1 1 0 0 1 1 1 1 1 1 1
51 1 1 1 1 0 1 1 0 0 0 1 1
52 1 1 1 1 0 1 1 1 0 0 1 1
53 1 1 1 1 0 1 1 1 1 0 1 1
54 1 1 1 1 0 1 1 1 0 1 1 1
55 1 1 1 1 0 1 1 1 1 1 1 1
56 1 1 1 1 1 1 1 1 0 0 1 1
57 1 1 1 1 1 1 1 1 1 0 1 1
58 1 1 1 1 1 1 1 1 1 1 1 1
59 0 0 0 0 0 0 0 0 0 0 2 0
60 0 0 0 0 0 1 0 0 0 0 2 0
61 0 0 0 0 0 1 1 0 0 0 2 1
62 1 0 0 0 0 0 0 0 0 0 2 0
63 1 0 0 0 0 1 0 0 0 0 2 0
64 1 0 0 0 0 0 1 0 0 0 2 0
65 1 0 0 0 0 1 1 0 0 0 2 1
66 1 0 0 0 0 1 1 1 0 0 2 1
67 1 1 0 0 0 0 0 0 0 0 2 0
68 1 1 0 0 0 1 0 0 0 0 2 0
69 1 1 0 0 0 1 1 0 0 0 2 1
70 1 1 0 0 0 1 0 1 0 0 2 1
71 1 1 0 0 0 1 1 1 0 0 2 1
72 1 1 0 0 0 1 1 1 1 0 2 1
73 1 1 1 0 0 1 0 0 0 0 2 0
74 1 1 1 0 0 1 1 0 0 0 2 1
75 1 1 1 0 0 1 1 1 0 0 2 1
76 1 1 1 0 0 1 1 0 1 0 2 1
77 1 1 1 0 0 1 1 1 1 0 2 1
78 1 1 1 0 0 1 1 1 1 1 2 1
79 1 1 1 1 0 1 1 0 0 0 2 1
80 1 1 1 1 0 1 1 1 0 0 2 1
81 1 1 1 1 0 1 1 1 1 0 2 1
82 1 1 1 1 0 1 1 1 0 1 2 1
83 1 1 1 1 0 1 1 1 1 1 2 1
84 1 1 1 1 1 1 1 1 0 0 2 1
85 1 1 1 1 1 1 1 1 1 0 2 1
86 1 1 1 1 1 1 1 1 1 1 2 1
87 0 0 0 0 0 0 0 0 0 0 3 0
88 0 0 0 0 0 1 0 0 0 0 3 0
89 0 0 0 0 0 1 1 0 0 0 3 0
90 1 0 0 0 0 0 0 0 0 0 3 0
91 1 0 0 0 0 1 0 0 0 0 3 0
92 1 0 0 0 0 0 1 0 0 0 3 0
93 1 0 0 0 0 1 1 0 0 0 3 0
94 1 0 0 0 0 1 1 1 0 0 3 1
95 1 1 0 0 0 0 0 0 0 0 3 0
96 1 1 0 0 0 1 0 0 0 0 3 0
97 1 1 0 0 0 1 1 0 0 0 3 0
98 1 1 0 0 0 1 0 1 0 0 3 0
99 1 1 0 0 0 1 1 1 0 0 3 1
100 1 1 0 0 0 1 1 1 1 0 3 1
101 1 1 1 0 0 1 0 0 0 0 3 0
102 1 1 1 0 0 1 1 0 0 0 3 0
103 1 1 1 0 0 1 1 1 0 0 3 1
104 1 1 1 0 0 1 1 0 1 0 3 1
105 1 1 1 0 0 1 1 1 1 0 3 1
106 1 1 1 0 0 1 1 1 1 1 3 1
107 1 1 1 1 0 1 1 0 0 0 3 0
108 1 1 1 1 0 1 1 1 0 0 3 1
109 1 1 1 1 0 1 1 1 1 0 3 1
110 1 1 1 1 0 1 1 1 0 1 3 1
111 1 1 1 1 0 1 1 1 1 1 3 1
112 1 1 1 1 1 1 1 1 0 0 3 1
113 1 1 1 1 1 1 1 1 1 0 3 1
114 1 1 1 1 1 1 1 1 1 1 3 1
115 0 0 0 0 0 0 0 0 0 0 4 0
116 0 0 0 0 0 1 0 0 0 0 4 0
117 0 0 0 0 0 1 1 0 0 0 4 0
118 1 0 0 0 0 0 0 0 0 0 4 0
119 1 0 0 0 0 1 0 0 0 0 4 0
120 1 0 0 0 0 0 1 0 0 0 4 0
121 1 0 0 0 0 1 1 0 0 0 4 0
122 1 0 0 0 0 1 1 1 0 0 4 0
123 1 1 0 0 0 0 0 0 0 0 4 0
124 1 1 0 0 0 1 0 0 0 0 4 0
125 1 1 0 0 0 1 1 0 0 0 4 0
126 1 1 0 0 0 1 0 1 0 0 4 0
127 1 1 0 0 0 1 1 1 0 0 4 0
128 1 1 0 0 0 1 1 1 1 0 4 1
129 1 1 1 0 0 1 0 0 0 0 4 0
130 1 1 1 0 0 1 1 0 0 0 4 0
131 1 1 1 0 0 1 1 1 0 0 4 0
132 1 1 1 0 0 1 1 0 1 0 4 0
133 1 1 1 0 0 1 1 1 1 0 4 1
134 1 1 1 0 0 1 1 1 1 1 4 1
135 1 1 1 1 0 1 1 0 0 0 4 0
136 1 1 1 1 0 1 1 1 0 0 4 0
137 1 1 1 1 0 1 1 1 1 0 4 1
138 1 1 1 1 0 1 1 1 0 1 4 1
139 1 1 1 1 0 1 1 1 1 1 4 1
140 1 1 1 1 1 1 1 1 0 0 4 0
141 1 1 1 1 1 1 1 1 1 0 4 1
142 1 1 1 1 1 1 1 1 1 1 4 1
143 0 0 0 0 0 0 0 0 0 0 5 0
144 0 0 0 0 0 1 0 0 0 0 5 0
145 0 0 0 0 0 1 1 0 0 0 5 0
146 1 0 0 0 0 0 0 0 0 0 5 0
147 1 0 0 0 0 1 0 0 0 0 5 0
148 1 0 0 0 0 0 1 0 0 0 5 0
149 1 0 0 0 0 1 1 0 0 0 5 0
150 1 0 0 0 0 1 1 1 0 0 5 0
151 1 1 0 0 0 0 0 0 0 0 5 0
152 1 1 0 0 0 1 0 0 0 0 5 0
153 1 1 0 0 0 1 1 0 0 0 5 0
154 1 1 0 0 0 1 0 1 0 0 5 0
155 1 1 0 0 0 1 1 1 0 0 5 0
156 1 1 0 0 0 1 1 1 1 0 5 0
157 1 1 1 0 0 1 0 0 0 0 5 0
158 1 1 1 0 0 1 1 0 0 0 5 0
159 1 1 1 0 0 1 1 1 0 0 5 0
160 1 1 1 0 0 1 1 0 1 0 5 0
161 1 1 1 0 0 1 1 1 1 0 5 0
162 1 1 1 0 0 1 1 1 1 1 5 1
163 1 1 1 1 0 1 1 0 0 0 5 0
164 1 1 1 1 0 1 1 1 0 0 5 0
165 1 1 1 1 0 1 1 1 1 0 5 0
166 1 1 1 1 0 1 1 1 0 1 5 0
167 1 1 1 1 0 1 1 1 1 1 5 1
168 1 1 1 1 1 1 1 1 0 0 5 0
169 1 1 1 1 1 1 1 1 1 0 5 0
170 1 1 1 1 1 1 1 1 1 1 5 1

4.6.2 Bayesian mixed effects model

4.6.2.1 Overall

fit_brm_exp2_responsibility_bayesian %>% 
  tidy(effects = "fixed",
       conf.method = "HPDinterval",
       fix.intercept = F) %>% 
  select(-c(effect, component)) %>% 
  rename(`lower 95% CI` = conf.low,
         `upper 95% CI` = conf.high) %>% 
  print_table()
term estimate std.error lower 95% CI upper 95% CI
Intercept 59.94 3.25 53.73 66.20
pivotality 13.52 1.82 9.98 17.08
n_causes -5.72 0.50 -6.77 -4.82
surprise 21.68 4.57 12.67 30.69

4.7 Plots

4.7.1 Scatterplot

4.7.1.1 Overall

df.plot = fit_brm_exp2_responsibility_bayesian %>% 
  fitted(newdata = df.exp2.regression.means,
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp2.regression.means) %>%
  arrange(desc(trial)) %>% 
  rename(rating = rating_mean)

x = "estimate"
y = "rating"

ggplot(data = df.plot,
       mapping = aes(x = .data[[x]],
                     y = .data[[y]])) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = .data[[x]],
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.1) +
  geom_point(size = 2,
             alpha = 0.2) +
  annotate(geom = "text",
           label = df.plot %>% 
             summarize(r = cor(.data[[x]], .data[[y]]),
                       rmse = rmse(.data[[x]], .data[[y]])) %>% 
             mutate(across(.fns = ~ round(., 2))) %>% 
             unlist() %>% 
             str_c(names(.), " = ", .),
           x = c(0, 0), 
           y = c(100, 90),
           size = 7,
           hjust = 0) + 
  labs(x = "model prediction",
       y = "mean responsibility rating") +
  scale_x_continuous(breaks = seq(0, 100, 25)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  coord_cartesian(xlim = c(0, 100),
                  ylim = c(0, 100)) +
  theme(text = element_text(size = 24))

# ggsave(file = "../../figures/plots/experiment2_overall_scatter.pdf",
#        width = 5,
#        height = 5)

#### Selection of cases

df.data = df.exp2.regression.means %>%
  left_join(df.exp1.trialinfo %>% 
              select(trial_exp1 = trial, trial = trial_exp2, person),
            by = c("trial", "person")) %>% 
  na.omit() %>% 
  left_join(df.exp1.regression %>% 
              select(trial, question, rating_mean) %>% 
              spread(question, rating_mean) %>% 
              rename(importance_empirical = importance,
                     surprise_empirical = surprise),
            by = c("trial_exp1" = "trial")) %>% 
  select(trial, trial_exp1, person, rating = rating_mean, rating_low, rating_high, contains("empirical"))

df.plot = fit_brm_exp2_selection_empirical %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  bind_cols(df.data) %>% 
  clean_names()

x = "estimate"
y = "rating"

ggplot(data = df.plot,
       mapping = aes(x = .data[[x]],
                     y = .data[[y]])) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = .data[[x]],
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.2) +
  geom_point(size = 2,
             alpha = 1) +
  annotate(geom = "text",
           label = df.plot %>% 
             summarize(r = cor(.data[[x]], .data[[y]]),
                       rmse = rmse(.data[[x]], .data[[y]])) %>% 
             mutate(across(.fns = ~ round(., 2))) %>% 
             unlist() %>% 
             str_c(names(.), " = ", .),
           x = c(0, 0), 
           y = c(100, 90),
           size = 7,
           hjust = 0) + 
  labs(x = "model prediction",
       y = "mean responsibility rating") +
  scale_x_continuous(breaks = seq(0, 100, 25)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  coord_cartesian(xlim = c(0, 100),
                  ylim = c(0, 100)) +
  theme(text = element_text(size = 24))

# ggsave(file = "../../figures/plots/experiment2_selection_scatter.pdf",
#        width = 5,
#        height = 5)

4.7.2 Means for selection of cases

# \u21e6 = left arrow
# \u21e8 = right arrow
# \u2713 = check mark 
# \u2717 = cross mark 
# \n = line break

# USING THE BAYESIAN MIXED MODEL RESULTS 

df.data = df.exp2.regression.means %>%
  left_join(df.exp1.trialinfo %>% 
              select(trial_exp1 = trial, trial = trial_exp2, person),
            by = c("trial", "person")) %>% 
  na.omit() %>% 
  left_join(df.exp1.regression %>% 
              select(trial, question, rating_mean) %>% 
              spread(question, rating_mean) %>% 
              rename(importance_empirical = importance,
                     surprise_empirical = surprise),
            by = c("trial_exp1" = "trial")) %>% 
  select(trial, trial_exp1, person, rating = rating_mean, rating_low, rating_high, contains("empirical"))

df.plot = fit_brm_exp2_selection_empirical %>% 
  fitted(newdata = df.data,
         re_formula = NA) %>%
  as_tibble() %>% 
  bind_cols(df.data) %>% 
  clean_names() %>% 
  # model predictions 
  left_join(df.exp2.predictions,
            by = c("trial", "person")) %>% 
  arrange(desc(rating)) %>% 
  mutate(index = 1:n()) %>% 
  rowwise() %>%
  mutate(n_same_no = n_same - n_same_yes,
         n_other_no = n_other - n_other_yes,
    label = str_c(index, ". ", "T=", rule, " \u21e8",
                       ifelse(party == 1, "S:", "O:"),
                       ifelse(vote == 1, "\u2713 ", "\u2717 "),
                  ifelse(n_same_yes != 0, str_c(rep("S:\u2713 ", n_same_yes), collapse = ""), ""),
                  ifelse(n_same_no != 0, str_c(rep("S:\u2717 ", n_same_no), collapse = ""), ""),
                  ifelse(n_other_yes != 0, str_c(rep("O:\u2713 ", n_other_yes), collapse = ""), ""),
                  ifelse(n_other_no != 0, str_c(rep("O:\u2717 ", n_other_no), collapse = ""), ""))) %>% 
  ungroup() %>% 
  rename(prediction = estimate,
         rating_mean = rating) %>% 
  select(trial_exp2 = trial, trial = trial_exp1, everything())

ggplot(data = df.plot,
       mapping = aes(x = reorder(label, rating_mean),
                     y = rating_mean)) +
  # confidence intervals 
  geom_linerange(aes(ymin = rating_low,
                     ymax = rating_high),
                 size = 1) +
  geom_point(aes(color = "responsibility"),
             size = 4) +
  geom_point(aes(y = prediction,
                 color = "model prediction"),
             size = 4) + 
  geom_point(aes(y = importance_empirical,
                 color = "importance"),
             size = 3
             ) + 
  geom_point(aes(y = surprise_empirical,
                 color = "surprise"),
             size = 3) + 
  scale_color_manual(breaks = c("surprise","importance","model prediction", "responsibility"),
                     values = c(surprise = "#e41a1c", 
                                importance = "#377eb8", 
                                `model prediction` = "gray80", 
                                responsibility = "black")) + 
  scale_y_continuous(breaks = seq(0, 100, 25),
                     labels = seq(0, 100, 25),
                     expand = c(0, 0)) + 
  labs(y = "judgments",
       color = "",
       caption = "T = threshold, S = same party, O = other party, \u21e8 = focus, \u2713 = yes, \u2717 = no") + 
  coord_flip(ylim = c(0, 100)) +
  geom_vline(xintercept = seq(0.5, 24.5, 1),
             color = "gray90") + 
  theme(axis.title.y = element_blank(),
        plot.margin = margin(r = 1,
                             b = 0.2,
                             l = 0.2,
                             unit = "cm"),
        text = element_text(size = 20),
        legend.position = "top",
        axis.text.y = element_text(family = "Arial Unicode MS",
                                   hjust = 0),
        plot.caption = element_text(family = "Arial Unicode MS",
                                    hjust = 1,
                                    margin = margin(t = 0.5, unit = "cm"),
                                    size = 16)) +
  guides(color = guide_legend(ncol = 2))

# ggsave(file = "../../figures/plots/experiment2_selection.pdf",
#        width = 8,
#        height = 12,
#        device = cairo_pdf)

5 Experiment 3: Moral judgments

5.1 Read in and structure data

# rating data
df.exp3 = read.csv("../../data/experiment_3/experiment_3.csv", fileEncoding = "UTF-8-BOM") %>% 
  clean_names()

# trial information 
df.exp3.trialinfo = df.exp1.trialinfo %>% 
  filter(str_detect(original_experiment, "Antonia")) %>% 
  select(-c(trial_exp2, person)) %>% 
  mutate(trial_exp3 = c(2, 1, 3, 4, 5))

# model predictions 
df.exp3.predictions = df.exp1.predictions %>% 
  filter(trial %in% df.exp3.trialinfo$trial) %>% 
  left_join(df.exp3.trialinfo %>% 
              select(trial, trial_exp3),
            by = "trial") %>% 
  filter(focus == person) %>% 
  rename(n_other_yes = sum,
         n_other = party) %>% 
  mutate(n_other = 4,
         n_other_yes = n_other_yes - 1) %>% 
  left_join(df.predictions %>% 
              select(vote_other,
                     n_other_yes,
                     n_other),
            by = c("n_other_yes",
                   "n_other")) %>% 
  mutate(surprise = 1 - vote_other)

# combine data frames 
df.exp3.long = df.exp3 %>% 
  select(participant, condition, x1:x5) %>% 
  gather("trial", "rating", -c(participant, condition)) %>% 
  mutate(trial = str_remove(trial,"x"),
         trial = as.numeric(trial)) %>% 
  mutate(condition = factor(condition,
                            levels = c(0, 1, 3),
                            labels = c("neutral",
                                       "moral",
                                       "wrongness")))

# regression data frame 
df.exp3.regression = df.exp3.long %>% 
  group_by(trial, condition) %>% 
  summarize(rating_mean = mean(rating),
            rating_low = smean.cl.boot(rating)[2],
            rating_high = smean.cl.boot(rating)[3]) %>%
  ungroup() %>% 
  left_join(df.exp3.predictions %>% 
              select(trial = trial_exp3, outcome, pivotality:norm_disposition),
            by = "trial") %>% 
  arrange(condition, trial)

5.2 Demographic data

df.exp3.demographics = df.exp3 %>% 
  select(participant, duration, gender, age, strategy) %>% 
  mutate(gender = case_when(gender == 1 ~ "male",
                            gender == 2 ~ "female",
                            gender == 3 ~ "unspecified"))

df.exp3.demographics %>% 
  summarize(age_mean = round(mean(age)),
            age_sd = round(sd(age)),
            n_total = n(),
            n_female = sum(gender == "female"),
            n_male = sum(gender == "male"),
            n_unspecified = sum(gender == "unspecified"),
            time_mean = round(mean(duration)/60, 2),
            time_sd = round(sd(duration)/60, 2)) %>% 
  print_table()
age_mean age_sd n_total n_female n_male n_unspecified time_mean time_sd
31 9 236 159 74 3 5.63 2.52

5.2.1 Participant feedback

df.exp3.demographics %>% 
  select(participant, strategy) %>% 
  datatable()

5.3 Stats

5.3.1 Means

5.3.1.1 Ratings by condition

df.exp3.long %>% 
  group_by(condition) %>% 
  summarize(mean = mean(rating),
            sd = sd(rating)) %>% 
  print_table()
condition mean sd
neutral 66.11 31.69
moral 78.42 28.53
wrongness 88.60 20.37

5.3.2 Bayesian regression

5.3.2.1 Effect of condition on judgments

df.data = df.exp3.long %>% 
  left_join(df.exp3.predictions,
            by = c("trial" = "trial_exp3"))

# model with condition 
fit_brm_exp3_importance_overall = brm(
  formula = rating ~ (pivotality + n_causes) * condition + (1 | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp3_importance_overall"
)

# model without condition
fit_brm_exp3_importance_no_condition = brm(
  formula = rating ~ pivotality + n_causes + (1 | participant),
  data = df.data,
  cores = 2,
  seed = 1,
  file = "cache/fit_brm_exp3_importance_no_condition"
)

# adding loo criterion 
fit_brm_exp3_importance_overall = add_criterion(fit_brm_exp3_importance_overall,
                                                criterion = "loo",
                                                reloo = T)

fit_brm_exp3_importance_no_condition = add_criterion(fit_brm_exp3_importance_no_condition,
                                                     criterion = "loo",
                                                     reloo = T)

# model comparison 
loo_compare(fit_brm_exp3_importance_overall,
            fit_brm_exp3_importance_no_condition)
                                     elpd_diff se_diff
fit_brm_exp3_importance_overall         0.0       0.0 
fit_brm_exp3_importance_no_condition -108.9      16.0 

5.3.2.2 Estimates for importance predictors separated by condition

func_brm_exp3 = function(data, condition){
  brm(formula = rating ~ pivotality + n_causes + (1 | participant),
      data = data %>% 
        filter(condition == condition),
      cores = 2,
      seed = 1,
      file = str_c("cache/fit_brm_exp3_", condition))
}

df.data = df.exp3.long %>% 
  left_join(df.exp3.predictions,
            by = c("trial" = "trial_exp3")) %>% 
  arrange(participant) %>% 
  group_by(condition) %>% 
  nest() %>% 
  arrange(condition) %>% 
  mutate(brm = map2(.x = data, .y = condition, ~ func_brm_exp3(.x, .y)))

tmp = map(1:3, ~ df.data$brm[[.]] %>% 
      tidy(effects = "fixed",
           conf.level = 0.95,
           conf.method = "HPDinterval",
           fix.intercept = F) %>% 
      select(-c(effect, component)))

print_table(tmp[[1]])
print_table(tmp[[2]])
print_table(tmp[[3]])
term estimate std.error conf.low conf.high
Intercept 55.14 8.65 38.75 72.58
pivotality 38.40 7.86 22.76 52.98
n_causes -2.77 1.52 -5.75 0.21
term estimate std.error conf.low conf.high
Intercept 86.35 8.45 69.95 102.80
pivotality 9.05 7.41 -6.16 23.00
n_causes -3.71 1.45 -6.64 -0.91
term estimate std.error conf.low conf.high
Intercept 89.78 3.31 83.33 96.16
pivotality -0.10 2.16 -4.46 3.94
n_causes 0.17 0.42 -0.64 1.00

5.4 Plots

5.4.1 Bars with mean judgments

# \u21e6 = left arrow
# \u21e8 = right arrow
# \u2713 = check mark 
# \u2717 = cross mark 
# \n = line break

x_labels = c(
  "1. T=1 \u21e8 \u2713 \u2717 \u2717 \u2717 \u2717",
  "2. T=1 \u21e8 \u2713 \u2713 \u2717 \u2717 \u2717",
  "3. T=1 \u21e8 \u2713 \u2713 \u2713 \u2713 \u2717",
  "4. T=2 \u21e8 \u2713 \u2713 \u2713 \u2717 \u2717",
  "5. T=2 \u21e8 \u2713 \u2713 \u2713 \u2713 \u2713")

df.plot = df.exp3.regression %>% 
  mutate(labels = factor(trial,
                         labels = x_labels))

ggplot(data = df.plot, 
       mapping = aes(x = reorder(labels,
                                 desc(labels)),
                     y = rating_mean)) + 
  geom_hline(yintercept = seq(25, 100, 25),
             linetype = 1,
             color = "gray80") + 
  geom_bar(stat = "identity",
           color = "black",
           fill = "#377eb8") + 
  geom_linerange(mapping = aes(ymin = rating_low,
                               ymax = rating_high),
                 size = 1) +
  geom_text(data = tibble(label = factor(c("neutral condition",
                                    "moral condition",
                                    "wrongness condition")),
                          condition = factor(c("neutral", "moral", "wrongness")),
                          x = Inf,
                          y = 0),
            mapping = aes(label = label,
                          x = x,
                          y = y),
            vjust = -1,
            hjust = 0,
            fontface = "bold",
            size = 4.5) +
  # geom_text(aes(label = "test", x = Inf, y = Inf), hjust = -1) + 
  geom_hline(yintercept = -0.5,
             size = 1) +
  facet_grid(cols = vars(condition),
             scales = "free",
             switch = "both",
             labeller = as_labeller(c(neutral = "responsibility",
                                      moral = "responsibility",
                                      wrongness = "wrongness"))) + 
  labs(y = "mean rating",
       caption = "T = threshold, \u21e8 = focus, \u2713 = yes, \u2717 = no") +
  scale_y_continuous(limits = c(-0.5, 100),
                     expand = c(0, 0)) + 
  coord_flip(clip = "off") +
  theme(text = element_text(size = 20),
        axis.text.y = element_text(family = "Arial Unicode MS",
                                   hjust = 0),
        axis.title.x = element_blank(),
        axis.title.y = element_blank(),
        axis.ticks.y = element_blank(),
        strip.background = element_blank(),
        strip.text = element_text(size = 16),
        panel.spacing.x = unit(1, "cm"),
        axis.line.y = element_blank(),
        plot.margin = margin(t = 0.8, r = 0.8, b = 0.1, l = 0.2, unit = "cm"),
        plot.caption = element_text(family = "Arial Unicode MS",
                                    # color = "gray20",
                                    hjust = 1,
                                    margin = margin(t = 0.5, unit = "cm")),
        strip.placement = "outside") 

# ggsave(file = "../../figures/plots/experiment3_bars.pdf",
#        width = 8,
#        height = 5,
#        device = cairo_pdf)

6 Experiment 4: Importance, surprise, responsibility, wrongfulness

6.1 Read in data

set.seed(1)

df.exp4 = read_csv("../../data/experiment_4/experiment_4.csv") %>% 
  clean_names() %>% 
  filter(row_number() > 2) %>% 
  filter(!is.na(prolific_id)) %>% 
  mutate(participant = 1:n()) %>% 
  relocate(participant)

# trial information 
df.exp4.trialinfo = read_csv("data/experiment4_scenarios.csv") %>% 
  rename(bad = badness)

# replace incorrectly coded trial 
df.exp4.trialinfo = df.exp4.trialinfo %>% 
  mutate(votes = ifelse(trial == 10, 2, votes))

# main data 
df.exp4.long = df.exp4 %>% 
  select(participant, x1_b_1:x9_w_1) %>% 
  rename_with(.fn = ~ str_remove(., "_1")) %>% 
  pivot_longer(cols = -participant,
               names_to = c("trial", "question"),
               names_sep = "_",
               values_to = "rating") %>% 
  rename(trial_label = trial) %>% 
  mutate(trial_label = as.numeric(str_remove(trial_label, "x")),
         question = factor(question,
                           levels = c("b", "i", "s", "r", "w"),
                           labels = c("badness",
                                      "importance",
                                      "surprise",
                                      "responsibility",
                                      "wrongfulness"))) %>% 
  ungroup() %>% 
  left_join(df.exp4.trialinfo %>% 
              select(-policy),
            by = "trial_label") %>% 
  mutate(pivotality = 1/(votes - 1),
         rating = as.numeric(rating)) %>% 
  select(trial, everything(), -trial_label) %>% 
  arrange(participant, trial)

# format for regression 
df.exp4.regression = df.exp4.long %>%
  pivot_wider(names_from = question,
              values_from = rating)

# mean judgments 
df.exp4.regression.means = df.exp4.regression %>% 
  group_by(trial, bad, votes, pivotality) %>% 
  summarize(across(.cols = badness:wrongfulness,
                   .fns = ~ smean.cl.boot(.))) %>% 
  mutate(index = c("mean", "low", "high")) %>% 
  ungroup() %>%
  pivot_wider(names_from = index,
              values_from = badness:wrongfulness) %>% 
  rename_with(.fn = ~ str_remove(., "_mean")) %>% 
  mutate(badness_dummy = ifelse(badness < median(badness), "neutral", "bad"))

df.exp4.regression = df.exp4.regression %>% 
  left_join(df.exp4.regression.means %>% 
              select(trial, badness_dummy),
            by = "trial")

# regression with scaled predictors
df.exp4.regression.scaled = df.exp4.regression %>% 
  group_by(participant) %>% 
  mutate(across(.cols = c(badness, importance, surprise, responsibility, wrongfulness),
                .fns = ~ scale(.))) %>% 
  ungroup()

# mean scaled judgments 
df.exp4.regression.scaled.means = df.exp4.regression.scaled %>% 
  group_by(trial, bad, votes, pivotality) %>% 
  summarize(across(.cols = badness:wrongfulness,
                   .fns = ~ smean.cl.boot(.))) %>% 
  mutate(index = c("mean", "low", "high")) %>% 
  ungroup() %>%
  pivot_wider(names_from = index,
              values_from = badness:wrongfulness) %>% 
  rename_with(.fn = ~ str_remove(., "_mean"))

6.2 Demographic data

df.exp4.demographics = df.exp4 %>% 
  select(participant, duration = duration_in_seconds, gender, age = age_1_text,
         race = race_1_text,importance, surprise) %>% 
  mutate(gender = tolower(gender)) %>% 
  mutate(across(c(duration, age), ~ as.numeric(.)))

df.exp4.demographics %>% 
  summarize(age_mean = round(mean(age)),
            age_sd = round(sd(age)),
            n_total = n(),
            n_female = sum(gender == "female"),
            n_male = sum(gender == "male"),
            n_unspecified = sum(gender == "unspecified"),
            time_mean = round(mean(duration)/60, 2),
            time_sd = round(sd(duration)/60, 2)) %>% 
  print_table()
age_mean age_sd n_total n_female n_male n_unspecified time_mean time_sd
32 11 50 29 21 0 28.95 14.73

6.2.1 Participant feedback

df.exp4.demographics %>% 
  select(participant, importance, surprise) %>% 
  datatable()

6.3 Stats

6.3.1 Badness ratings

df.exp4.regression.means %>% 
  select(trial, contains("bad")) %>% 
  left_join(df.exp4.trialinfo %>% 
              select(trial, policy),
            by = "trial") %>% 
  relocate(policy, .after = trial) %>% 
  arrange(bad) %>% 
  print_table()
trial policy bad badness badness_low badness_high badness_dummy
10 change the color of all government buildings to red 1 63.58 52.92 74.72 neutral
18 change the standard size of public trashcans from 33 gallons to 60 gallons 1 10.76 6.20 15.94 neutral
20 changing the font of government docs to Arial 1 13.28 7.00 19.64 neutral
22 make it mandatory to keep dogs on leash at all times in public 1 18.14 12.10 25.38 neutral
4 increase sales taxes on meat products (e.g. sausages or burgers) by 3 percent 2 40.10 32.02 48.68 neutral
6 introduce tax breaks for dog owners 2 37.18 28.68 45.92 neutral
12 legally require every citizen to get vaccinated against the flu 2 49.20 38.98 59.24 neutral
21 legalize gambling in casinos 2 25.96 18.48 33.64 neutral
8 introduce a fine of at least $200 for littering in public places 3 16.10 10.50 22.94 neutral
9 ban children who are not vaccinated against Varicella from going to school 3 45.60 35.74 55.76 neutral
13 change the legal voting age to 35 years 3 75.56 66.68 83.62 bad
16 make it easier for landlords to increase rent 3 69.80 61.92 77.28 neutral
5 Exempt all politicians from paying taxes 4 88.64 81.16 94.90 bad
14 allow all citizens over the age of 21 to purchase a gun without a license 4 73.96 65.59 81.98 bad
17 decriminalizing corruption for politicians 4 89.04 82.08 94.90 bad
24 requiring every citizen older than 12 to have their own social media profile for official identification purposes 4 81.96 73.78 89.46 bad
3 introduce prison sentences of at least 5 years for doctors who practice abortion 5 73.58 63.98 83.20 neutral
11 declare that sexual abuse within marriage does not count as a criminal liability 5 95.64 92.26 98.50 bad
15 criminalize same-sex relationships 5 82.12 72.48 90.96 bad
19 introduce prison sentences for extramarital affairs 5 75.86 67.18 83.40 bad
1 introduce corporal punishment in schools 6 83.64 76.34 90.16 bad
2 introduce the death penalty for minor crimes (e.g. theft) 6 96.40 93.48 98.68 bad
7 introducing a law that makes it illegal for people with disabilities to get a job 6 95.86 91.16 99.08 bad
23 lie to citizens of the country about an oil leak that is poisening the country’s drinking water 6 97.56 95.24 99.30 bad

6.3.2 Confirmatory analysis

6.3.2.1 Hypothesis 1: Pivotality predicts importance

The fewer politicians voted in favor of the policy, the more important a politician’s vote who voted in favor is judged to be.

fit.brm_exp4_pivotality_importance = brm(
  formula = importance ~ 1 + pivotality + (1 + pivotality | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  file = "cache/fit_brm_exp4_pivotality_importance")

fit.brm_exp4_pivotality_importance
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: importance ~ 1 + pivotality + (1 + pivotality | participant) 
   Data: df.exp4.regression (Number of observations: 1200) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 50) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                34.81      3.70    28.34    42.78 1.01      368
sd(pivotality)               39.81      4.48    31.91    49.88 1.01      375
cor(Intercept,pivotality)    -0.95      0.02    -0.97    -0.91 1.01      681
                          Tail_ESS
sd(Intercept)                  926
sd(pivotality)                 919
cor(Intercept,pivotality)     1736

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     22.48      4.85    13.22    32.11 1.03      195      448
pivotality    65.68      5.72    54.32    76.67 1.03      201      581

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    16.37      0.35    15.70    17.05 1.00     4977     3331

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

6.3.2.2 Hypothesis 2: Moral badness predicts surprise

The more morally negative a policy is perceived, the more surprising a vote in favor of that policy is judged to be.

fit.brm_exp4_surprise_badness = brm(
  formula = surprise ~ 1 + badness + (1 + badness | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  file = "cache/fit_brm_exp4_surprise_badness")

fit.brm_exp4_surprise_badness
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: surprise ~ 1 + badness + (1 + badness | participant) 
   Data: df.exp4.regression (Number of observations: 1200) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 50) 
                       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)             13.32      2.21     9.42    18.14 1.00     1348
sd(badness)                0.23      0.03     0.18     0.30 1.00      635
cor(Intercept,badness)    -0.44      0.16    -0.72    -0.09 1.00      520
                       Tail_ESS
sd(Intercept)              1865
sd(badness)                1371
cor(Intercept,badness)     1017

Population-Level Effects: 
          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    26.30      2.42    21.67    31.13 1.00     2535     2420
badness       0.36      0.04     0.28     0.43 1.00     1456     2255

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    26.17      0.57    25.07    27.29 1.00     5040     2969

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

6.3.2.3 Hypothesis 3: Importance and surprise predict responsibility

Both judgments of importance and surprise predict judgments of responsibility.

fit.brm_exp4_responsibility_importance_surprise = brm(
  formula = responsibility ~ 1 + importance + surprise + (1 + importance + surprise | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  iter = 4000,
  file = "cache/fit_brm_exp4_responsibility_importance_surprise")

fit.brm_exp4_responsibility_importance_surprise
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: responsibility ~ 1 + importance + surprise + (1 + importance + surprise | participant) 
   Data: df.exp4.regression (Number of observations: 1200) 
Samples: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup samples = 8000

Group-Level Effects: 
~participant (Number of levels: 50) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                23.63      2.71    18.89    29.43 1.00     1749
sd(importance)                0.24      0.03     0.19     0.30 1.00     1934
sd(surprise)                  0.03      0.02     0.00     0.07 1.00     1293
cor(Intercept,importance)    -0.96      0.02    -0.98    -0.91 1.00     2945
cor(Intercept,surprise)       0.13      0.42    -0.74     0.87 1.00     8057
cor(importance,surprise)     -0.19      0.43    -0.88     0.74 1.00     7043
                          Tail_ESS
sd(Intercept)                 3021
sd(importance)                3293
sd(surprise)                  2814
cor(Intercept,importance)     4765
cor(Intercept,surprise)       5041
cor(importance,surprise)      5157

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     25.53      3.53    18.66    32.42 1.00     1024     2001
importance     0.64      0.04     0.57     0.72 1.00     1082     2069
surprise       0.05      0.01     0.03     0.08 1.00     7916     5532

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    12.47      0.27    11.97    13.01 1.00     8870     5956

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

6.3.2.4 Hypothesis 4: Moral badness interacts with importance and surprise to predict responsibility

When judging responsibility, importance matters more for policies that are perceived as morally neutral (compared to morally negative policies), and surprise matters more for policies that are perceived as morally negative (compared to morally neutral policies).

fit.brm_exp4_responsibility_badness_dummy = brm(
  formula = responsibility ~ 1 + badness_dummy * (importance + surprise) + (1 + badness_dummy * (importance + surprise) | participant),
  data = df.exp4.regression %>% 
    mutate(badness_dummy = ifelse(badness_dummy == "bad", -0.5, 0.5)),
  seed = 1,
  cores = 2,
  file = "cache/fit_brm_exp4_responsibility_badness_dummy")

fit.brm_exp4_responsibility_badness_dummy
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: responsibility ~ 1 + badness_dummy * (importance + surprise) + (1 + badness_dummy * (importance + surprise) | participant) 
   Data: df.exp4.regression %>% mutate(badness_dummy = ifel (Number of observations: 1200) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 50) 
                                                     Estimate Est.Error
sd(Intercept)                                           23.44      2.52
sd(badness_dummy)                                        4.96      2.64
sd(importance)                                           0.24      0.03
sd(surprise)                                             0.03      0.02
sd(badness_dummy:importance)                             0.04      0.03
sd(badness_dummy:surprise)                               0.03      0.03
cor(Intercept,badness_dummy)                            -0.35      0.24
cor(Intercept,importance)                               -0.95      0.02
cor(badness_dummy,importance)                            0.28      0.25
cor(Intercept,surprise)                                 -0.03      0.31
cor(badness_dummy,surprise)                              0.03      0.36
cor(importance,surprise)                                -0.07      0.32
cor(Intercept,badness_dummy:importance)                  0.09      0.34
cor(badness_dummy,badness_dummy:importance)             -0.37      0.42
cor(importance,badness_dummy:importance)                -0.11      0.33
cor(surprise,badness_dummy:importance)                   0.09      0.37
cor(Intercept,badness_dummy:surprise)                    0.02      0.35
cor(badness_dummy,badness_dummy:surprise)               -0.11      0.38
cor(importance,badness_dummy:surprise)                  -0.05      0.35
cor(surprise,badness_dummy:surprise)                    -0.02      0.37
cor(badness_dummy:importance,badness_dummy:surprise)    -0.15      0.39
                                                     l-95% CI u-95% CI Rhat
sd(Intercept)                                           18.98    28.80 1.00
sd(badness_dummy)                                        0.74    11.37 1.01
sd(importance)                                           0.19     0.30 1.00
sd(surprise)                                             0.00     0.08 1.00
sd(badness_dummy:importance)                             0.00     0.12 1.01
sd(badness_dummy:surprise)                               0.00     0.10 1.00
cor(Intercept,badness_dummy)                            -0.78     0.18 1.00
cor(Intercept,importance)                               -0.98    -0.89 1.00
cor(badness_dummy,importance)                           -0.26     0.74 1.00
cor(Intercept,surprise)                                 -0.62     0.59 1.00
cor(badness_dummy,surprise)                             -0.65     0.68 1.00
cor(importance,surprise)                                -0.66     0.58 1.00
cor(Intercept,badness_dummy:importance)                 -0.61     0.67 1.00
cor(badness_dummy,badness_dummy:importance)             -0.94     0.55 1.01
cor(importance,badness_dummy:importance)                -0.71     0.58 1.00
cor(surprise,badness_dummy:importance)                  -0.63     0.76 1.00
cor(Intercept,badness_dummy:surprise)                   -0.66     0.68 1.00
cor(badness_dummy,badness_dummy:surprise)               -0.77     0.64 1.00
cor(importance,badness_dummy:surprise)                  -0.70     0.64 1.00
cor(surprise,badness_dummy:surprise)                    -0.72     0.69 1.00
cor(badness_dummy:importance,badness_dummy:surprise)    -0.82     0.65 1.00
                                                     Bulk_ESS Tail_ESS
sd(Intercept)                                             926     1755
sd(badness_dummy)                                         581      896
sd(importance)                                           1020     1909
sd(surprise)                                              683     1496
sd(badness_dummy:importance)                              676     1225
sd(badness_dummy:surprise)                                979     1455
cor(Intercept,badness_dummy)                             3763     2753
cor(Intercept,importance)                                1329     2371
cor(badness_dummy,importance)                            1489     2133
cor(Intercept,surprise)                                  5268     2934
cor(badness_dummy,surprise)                              2672     2913
cor(importance,surprise)                                 4546     3028
cor(Intercept,badness_dummy:importance)                  3422     2949
cor(badness_dummy,badness_dummy:importance)               985     1501
cor(importance,badness_dummy:importance)                 2604     2835
cor(surprise,badness_dummy:importance)                   2777     3065
cor(Intercept,badness_dummy:surprise)                    4951     3049
cor(badness_dummy,badness_dummy:surprise)                4387     3323
cor(importance,badness_dummy:surprise)                   5063     3008
cor(surprise,badness_dummy:surprise)                     2807     2891
cor(badness_dummy:importance,badness_dummy:surprise)     2353     3199

Population-Level Effects: 
                         Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
Intercept                   26.45      3.59    19.45    33.52 1.01      507
badness_dummy               -7.92      2.09   -12.12    -4.07 1.00     2058
importance                   0.64      0.04     0.56     0.71 1.01      581
surprise                     0.05      0.02     0.02     0.08 1.00     4694
badness_dummy:importance     0.04      0.03    -0.01     0.10 1.00     2387
badness_dummy:surprise       0.08      0.03     0.02     0.13 1.00     4365
                         Tail_ESS
Intercept                     900
badness_dummy                1263
importance                    995
surprise                     3050
badness_dummy:importance     1523
badness_dummy:surprise       3280

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    12.20      0.28    11.64    12.74 1.00     2496     1978

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

6.3.2.5 Hypothesis 5: Importance matters more for responsibility compared to wrongness

Perceived importance matters more for judgments of responsibility compared to wrongness judgments. Note that participants who gave the same response for one of these questions across all trials are removed from this analysis (because of z-scoring on the individual participant level).

# responsibility 
fit.brm_exp4_responsibility_importance_surprise_scaled = brm(
  formula = responsibility ~ 1 + importance + surprise + (1 + importance + surprise | participant),
  data = df.exp4.regression.scaled,
  seed = 1,
  cores = 2,
  file = "cache/fit_brm_exp4_responsibility_importance_surprise_scaled")

fit.brm_exp4_responsibility_importance_surprise_scaled

# wrongfulness 
fit.brm_exp4_wrongfulness_importance_surprise_scaled = brm(
  formula = wrongfulness ~ 1 + importance + surprise + (1 + importance + surprise | participant),
  data = df.exp4.regression.scaled,
  seed = 1,
  cores = 2,
  iter = 8000,
  control = list(adapt_delta = 0.99),
  file = "cache/fit_brm_exp4_wrongfulness_importance_surprise_scaled")

fit.brm_exp4_wrongfulness_importance_surprise_scaled
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: responsibility ~ 1 + importance + surprise + (1 + importance + surprise | participant) 
   Data: df.exp4.regression.scaled (Number of observations: 1152) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~participant (Number of levels: 48) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                 0.02      0.01     0.00     0.04 1.00     3123
sd(importance)                0.26      0.03     0.20     0.33 1.00     1688
sd(surprise)                  0.06      0.03     0.01     0.12 1.01     1097
cor(Intercept,importance)     0.04      0.51    -0.87     0.89 1.04       78
cor(Intercept,surprise)      -0.02      0.50    -0.89     0.86 1.02      219
cor(importance,surprise)     -0.54      0.31    -0.94     0.24 1.00     3270
                          Tail_ESS
sd(Intercept)                 1594
sd(importance)                2122
sd(surprise)                  1301
cor(Intercept,importance)      345
cor(Intercept,surprise)       1250
cor(importance,surprise)      2811

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept      0.00      0.02    -0.04     0.04 1.00     7600     2711
importance     0.72      0.04     0.63     0.80 1.00     2182     2768
surprise       0.09      0.02     0.04     0.13 1.00     4461     2640

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.62      0.01     0.59     0.64 1.00     7085     2672

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: wrongfulness ~ 1 + importance + surprise + (1 + importance + surprise | participant) 
   Data: df.exp4.regression.scaled (Number of observations: 1176) 
Samples: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
         total post-warmup samples = 16000

Group-Level Effects: 
~participant (Number of levels: 49) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                 0.02      0.02     0.00     0.06 1.00    13669
sd(importance)                0.21      0.04     0.13     0.30 1.00     7239
sd(surprise)                  0.31      0.04     0.23     0.40 1.00     6391
cor(Intercept,importance)     0.03      0.50    -0.87     0.87 1.00      590
cor(Intercept,surprise)      -0.04      0.50    -0.90     0.87 1.01      372
cor(importance,surprise)     -0.50      0.17    -0.79    -0.12 1.00     4771
                          Tail_ESS
sd(Intercept)                 7712
sd(importance)                9241
sd(surprise)                 11164
cor(Intercept,importance)     1932
cor(Intercept,surprise)       1264
cor(importance,surprise)      8988

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     -0.00      0.02    -0.05     0.05 1.00    21414    11097
importance    -0.06      0.04    -0.15     0.01 1.00     8472    10237
surprise       0.43      0.05     0.33     0.53 1.00     7789    10403

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.83      0.02     0.80     0.87 1.00    19265    11926

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

6.3.2.6 Model checks

fit.brm_exp4_pivotality_importance %>% 
  plot(N = 6,
       ask = F)

fit.brm_exp4_surprise_badness %>% 
  plot(N = 6,
       ask = F)

fit.brm_exp4_responsibility_importance_surprise %>% 
  plot(N = 5,
       ask = F)

fit.brm_exp4_responsibility_badness_dummy %>% 
  plot(N = 5,
       ask = F)

fit.brm_exp4_responsibility_importance_surprise_scaled %>% 
  plot(N = 5,
       ask = F)

fit.brm_exp4_wrongfulness_importance_surprise_scaled %>% 
  plot(N = 5,
       ask = F)

6.3.3 Exploratory analysis

6.3.3.1 Responsibility: the role of surprise and importance

fit.brm_exp4_responsibility_importance = brm(
  formula = responsibility ~ 1 + importance + (1 + importance | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  iter = 4000,
  file = "cache/fit_brm_exp4_responsibility_importance")

fit.brm_exp4_responsibility_surprise = brm(
  formula = responsibility ~ 1 + surprise + (1 + surprise | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  iter = 4000,
  file = "cache/fit_brm_exp4_responsibility_surprise")

fit.brm_exp4_responsibility_importance = add_criterion(
  fit.brm_exp4_responsibility_importance,
  criterion = "loo",
  reloo = T,
  file = "cache/fit_brm_exp4_responsibility_importance")

fit.brm_exp4_responsibility_surprise = add_criterion(
  fit.brm_exp4_responsibility_surprise,
  criterion = "loo",
  reloo = T,
  file = "cache/fit_brm_exp4_responsibility_surprise")

fit.brm_exp4_responsibility_importance_surprise = add_criterion(
  fit.brm_exp4_responsibility_importance_surprise,
  criterion = "loo",
  reloo = T,
  file = "cache/fit_brm_exp4_responsibility_importance_surprise")

loo_compare(fit.brm_exp4_responsibility_importance,
            fit.brm_exp4_responsibility_surprise,
            fit.brm_exp4_responsibility_importance_surprise)

model_weights(fit.brm_exp4_responsibility_importance,
              fit.brm_exp4_responsibility_surprise,
              fit.brm_exp4_responsibility_importance_surprise)

6.3.3.2 Wrongfulness: the role of surprise and importance

fit.brm_exp4_wrongfulness_importance = brm(
  formula = wrongfulness ~ 1 + importance + (1 + importance | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  iter = 4000,
  file = "cache/fit_brm_exp4_wrongfulness_importance")

fit.brm_exp4_wrongfulness_surprise = brm(
  formula = wrongfulness ~ 1 + surprise + (1 + surprise | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  iter = 4000,
  file = "cache/fit_brm_exp4_wrongfulness_surprise")

fit.brm_exp4_wrongfulness_importance_surprise = brm(
  formula = wrongfulness ~ 1 + importance + surprise + (1 + importance + surprise | participant),
  data = df.exp4.regression,
  seed = 1,
  cores = 2,
  iter = 4000,
  file = "cache/fit_brm_exp4_wrongfulness_importance_surprise")

fit.brm_exp4_wrongfulness_importance = add_criterion(
  fit.brm_exp4_wrongfulness_importance,
  criterion = "loo",
  reloo = T,
  file = "cache/fit_brm_exp4_wrongfulness_importance")

fit.brm_exp4_wrongfulness_surprise = add_criterion(
  fit.brm_exp4_wrongfulness_surprise,
  criterion = "loo",
  reloo = T,
  file = "cache/fit_brm_exp4_wrongfulness_surprise")

fit.brm_exp4_wrongfulness_importance_surprise = add_criterion(
  fit.brm_exp4_wrongfulness_importance_surprise,
  criterion = "loo",
  reloo = T,
  file = "cache/fit_brm_exp4_wrongfulness_importance_surprise")

loo_compare(fit.brm_exp4_wrongfulness_importance,
            fit.brm_exp4_wrongfulness_surprise,
            fit.brm_exp4_wrongfulness_importance_surprise)

model_weights(fit.brm_exp4_wrongfulness_importance,
              fit.brm_exp4_wrongfulness_surprise,
              fit.brm_exp4_wrongfulness_importance_surprise)

6.3.3.3 Individual participants analysis

6.3.3.3.1 Individual participant regressions

We remove participant 45 from the individual regression analysis because this participant gave a 0 surprise rating for all scenarios (leading to problems with model fitting).

df.exp4.regression %>% 
  group_by(participant) %>% 
  summarize(across(.cols = c(wrongfulness, importance, surprise),
                   .fns = list(sd = ~ sd(.),
                               mean = ~ mean(.))))
# A tibble: 50 x 7
   participant wrongfulness_sd wrongfulness_me… importance_sd importance_mean
 *       <int>           <dbl>            <dbl>         <dbl>           <dbl>
 1           1            44.9             51.7          29.0            56.5
 2           2            41.9             61.8          39.8            42.8
 3           3            37.8             65.0          15.1            84.4
 4           4            47.2             64.0          38.3            41.5
 5           5            40.3             37.2          15.7            68.9
 6           6            47.4             55.4          36.4            41.8
 7           7            44.2             61.7          11.0            97.8
 8           8            43.0             62.2          26.5            71.9
 9           9            40.0             68.2          17.2            84.5
10          10            38.4             61.2          28.7            61.7
# … with 40 more rows, and 2 more variables: surprise_sd <dbl>,
#   surprise_mean <dbl>
6.3.3.3.1.1 Responsibility

This code chunk takes a long time to compute but needs to be run once to generate the .RData file with individual data fits. The file was larger than 100mb so we couldn’t upload it to github.

# participant 45 is removed from this analysis because they gave a 0 surprise rating
# across all trials  
df.fit = df.exp4.regression %>% 
  filter(participant != 45)

# restrict priors to be positive 
priors = prior(normal(0, 10), class = b, lb = 0)

# initial model fits (used for compilation)
fit.brm_exp4_responsibility_baseline_individual = 
  brm(formula = responsibility ~ 1,
      data = df.fit %>% 
        filter(participant == 1),
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_responsibility_baseline_individual"))

fit.brm_exp4_responsibility_importance_individual = 
  brm(formula = responsibility ~ 1 + importance,
      data = df.fit %>% 
        filter(participant == 1),
      prior = priors,
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_responsibility_importance_individual"))

fit.brm_exp4_responsibility_surprise_individual = 
  brm(formula = responsibility ~ 1 + surprise,
      data = df.fit %>% 
        filter(participant == 1),
      prior = priors,
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_responsibility_surprise_individual"))

fit.brm_exp4_responsibility_importance_surprise_individual = 
  brm(formula = responsibility ~ 1 + importance + surprise,
      data = df.fit %>% 
        filter(participant == 1),
      prior = priors,
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_responsibility_importance_surprise_individual"))

# update model fits for all participants
df.exp4.responsibility_individual_fit = df.fit %>%
  group_by(participant) %>%
  nest() %>%
  ungroup() %>% 
  # fit model for each participant
  mutate(fit_baseline = map(
    .x = data,
    .f = ~ update(fit.brm_exp4_responsibility_baseline_individual,
                  newdata = .x,
                  seed = 1)),
    fit_importance = map(
      .x = data,
      .f = ~ update(fit.brm_exp4_responsibility_importance_individual,
                    newdata = .x,
                    seed = 1)),
    fit_surprise = map(
      .x = data,
      .f = ~ update(fit.brm_exp4_responsibility_surprise_individual,
                    newdata = .x,
                    seed = 1)),
    fit_importance_surprise = map(
      .x = data,
      .f = ~ update(fit.brm_exp4_responsibility_importance_surprise_individual,
                    newdata = .x,
                    seed = 1))) %>% 
  mutate(fit_baseline = map(fit_baseline, ~ add_criterion(.,
                                                          criterion = "loo",
                                                          moment_match = T)),
         fit_importance = map(fit_importance, ~ add_criterion(.,
                                                              criterion = "loo",
                                                              moment_match = T)),
         fit_surprise = map(fit_surprise, ~ add_criterion(.,
                                                          criterion = "loo",
                                                          moment_match = T)),
         fit_importance_surprise = map(fit_importance_surprise,
                                       ~ add_criterion(.,
                                                       criterion = "loo",
                                                       moment_match = T)),
         r_importance = map2_dbl(.x = data,
                                 .y = fit_importance,
                                 .f = ~ cor(.x$responsibility, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         r_surprise = map2_dbl(.x = data,
                                 .y = fit_surprise,
                                 .f = ~ cor(.x$responsibility, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         r_importance_surprise = map2_dbl(.x = data,
                                 .y = fit_importance_surprise,
                                 .f = ~ cor(.x$responsibility, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_baseline = map2_dbl(.x = data,
                                 .y = fit_baseline,
                                 .f = ~ rmse(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_importance = map2_dbl(.x = data,
                                 .y = fit_importance,
                                 .f = ~ rmse(.x$responsibility, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_surprise = map2_dbl(.x = data,
                                 .y = fit_surprise,
                                 .f = ~ rmse(.x$responsibility, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_importance_surprise = map2_dbl(.x = data,
                                 .y = fit_importance_surprise,
                                 .f = ~ rmse(.x$responsibility, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         model_comparison = pmap(.l = list(baseline = fit_baseline,
                                           importance = fit_importance,
                                           surprise = fit_surprise,
                                           importance_surprise = fit_importance_surprise),
                                 .f = ~ loo_compare(..1, ..2, ..3, ..4)),
         best_model = map_chr(.x = model_comparison,
                              .f = ~ rownames(.) %>% .[1]),
         best_model = factor(best_model,
                             levels = c("..1", "..2", "..3", "..4"),
                             labels = c("baseline",
                                        "importance",
                                        "surprise",
                                        "importance_surprise")))

save(list = c("df.exp4.responsibility_individual_fit"),
     file = "data/exp4_responsibility_individual_fit.RData")
6.3.3.3.1.2 Wrongfulness

This code chunk takes a long time to compute but needs to be run once to generate the .RData file with individual data fits. The file was larger than 100mb so we couldn’t upload it to github.

# participant 45 is removed from this analysis because they gave a 0 surprise rating
# across all trials  
df.fit = df.exp4.regression %>% 
  filter(participant != 45)

# restrict priors to be positive 
priors = prior(normal(0, 10), class = b, lb = 0)

# initial model fits (used for compilation)
fit.brm_exp4_wrongfulness_baseline_individual = 
  brm(formula = wrongfulness ~ 1,
      data = df.fit %>% 
        filter(participant == 1),
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_wrongfulness_baseline_individual"))

fit.brm_exp4_wrongfulness_importance_individual = 
  brm(formula = wrongfulness ~ 1 + importance,
      data = df.fit %>% 
        filter(participant == 1),
      prior = priors,
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_wrongfulness_importance_individual"))

fit.brm_exp4_wrongfulness_surprise_individual = 
  brm(formula = wrongfulness ~ 1 + surprise,
      data = df.fit %>% 
        filter(participant == 1),
      prior = priors,
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_wrongfulness_surprise_individual"))

fit.brm_exp4_wrongfulness_importance_surprise_individual = 
  brm(formula = wrongfulness ~ 1 + importance + surprise,
      data = df.fit %>% 
        filter(participant == 1),
      prior = priors,
      cores = 4,
      chains = 4,
      iter = 4000,
      seed = 1,
      control = list(adapt_delta = 0.95),
      save_pars = save_pars(all = T),
      file = str_c("cache/fit_brm_exp4_wrongfulness_importance_surprise_individual"))

# update model fits for all participants
df.exp4.wrongfulness_individual_fit = df.fit %>%
  group_by(participant) %>%
  nest() %>%
  ungroup() %>% 
  # fit model for each participant
  mutate(fit_baseline = map(
    .x = data,
    .f = ~ update(fit.brm_exp4_wrongfulness_baseline_individual,
                  newdata = .x,
                  seed = 1)),
    fit_importance = map(
      .x = data,
      .f = ~ update(fit.brm_exp4_wrongfulness_importance_individual,
                    newdata = .x,
                    seed = 1)),
    fit_surprise = map(
      .x = data,
      .f = ~ update(fit.brm_exp4_wrongfulness_surprise_individual,
                    newdata = .x,
                    seed = 1)),
    fit_importance_surprise = map(
      .x = data,
      .f = ~ update(fit.brm_exp4_wrongfulness_importance_surprise_individual,
                    newdata = .x,
                    seed = 1))) %>% 
  mutate(fit_baseline = map(fit_baseline, ~ add_criterion(.,
                                                          criterion = "loo",
                                                          moment_match = T)),
         fit_importance = map(fit_importance, ~ add_criterion(.,
                                                              criterion = "loo",
                                                              moment_match = T)),
         fit_surprise = map(fit_surprise, ~ add_criterion(.,
                                                          criterion = "loo",
                                                          moment_match = T)),
         fit_importance_surprise = map(fit_importance_surprise,
                                       ~ add_criterion(.,
                                                       criterion = "loo",
                                                       moment_match = T)),
         r_importance = map2_dbl(.x = data,
                                 .y = fit_importance,
                                 .f = ~ cor(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         r_surprise = map2_dbl(.x = data,
                                 .y = fit_surprise,
                                 .f = ~ cor(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         r_importance_surprise = map2_dbl(.x = data,
                                 .y = fit_importance_surprise,
                                 .f = ~ cor(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_baseline = map2_dbl(.x = data,
                                 .y = fit_baseline,
                                 .f = ~ rmse(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_importance = map2_dbl(.x = data,
                                 .y = fit_importance,
                                 .f = ~ rmse(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_surprise = map2_dbl(.x = data,
                                 .y = fit_surprise,
                                 .f = ~ rmse(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         rmse_importance_surprise = map2_dbl(.x = data,
                                 .y = fit_importance_surprise,
                                 .f = ~ rmse(.x$wrongfulness, .y %>% 
                                              fitted() %>% 
                                              .[, 1])),
         model_comparison = pmap(.l = list(baseline = fit_baseline,
                                           importance = fit_importance,
                                           surprise = fit_surprise,
                                           importance_surprise = fit_importance_surprise),
                                 .f = ~ loo_compare(..1, ..2, ..3, ..4)),
         best_model = map_chr(.x = model_comparison,
                              .f = ~ rownames(.) %>% .[1]),
         best_model = factor(best_model,
                             levels = c("..1", "..2", "..3", "..4"),
                             labels = c("baseline",
                                        "importance",
                                        "surprise",
                                        "importance_surprise")))

save(list = c("df.exp4.wrongfulness_individual_fit"),
     file = "data/exp4_wrongfulness_individual_fit.RData")
6.3.3.3.2 Pairwise correlations between measures
df.exp4.individual.correlations = df.exp4.long %>%
  select(-c(bad, votes)) %>% 
  pivot_wider(names_from = question,
              values_from = rating) %>% 
  group_by(participant) %>% 
  summarize(r_pivotality_importance = cor(pivotality, importance),
            r_badness_surprise = cor(badness, surprise),
            r_importance_responsibility = cor(importance, responsibility),
            r_surprise_responsibility = cor(surprise, responsibility),
            r_importance_wrongfulness = cor(importance, wrongfulness),
            r_surprise_wrongfulness = cor(surprise, wrongfulness)) %>% 
  ungroup()

6.4 Tables

6.4.1 Bayesian regression models

6.4.1.1 1. Pivotality predicts importance

fit.brm_exp4_pivotality_importance %>% 
  fun_brms_table(format = "latex")
% latex table generated in R 4.0.3 by xtable 1.8-4 package
% Wed Jul  7 09:30:02 2021
\begin{table}[ht]
\centering
\caption{Caption} 
\label{tab:table}
\begin{tabular}{lrrrr}
  \toprule
term & estimate & std.error & lower 95\% HDI & upper 95\% HDI \\ 
  \midrule
intercept & 22.48 & 4.85 & 13.22 & 32.11 \\ 
  pivotality & 65.68 & 5.72 & 54.32 & 76.67 \\ 
   \bottomrule
\end{tabular}
\end{table}

6.4.1.2 2. Badness predicts surprise

fit.brm_exp4_surprise_badness %>% 
  fun_brms_table(format = "latex")
% latex table generated in R 4.0.3 by xtable 1.8-4 package
% Wed Jul  7 09:30:02 2021
\begin{table}[ht]
\centering
\caption{Caption} 
\label{tab:table}
\begin{tabular}{lrrrr}
  \toprule
term & estimate & std.error & lower 95\% HDI & upper 95\% HDI \\ 
  \midrule
intercept & 26.30 & 2.42 & 21.67 & 31.13 \\ 
  badness & 0.36 & 0.04 & 0.28 & 0.43 \\ 
   \bottomrule
\end{tabular}
\end{table}

6.4.1.3 3. Importance and surprise predict responsibility

fit.brm_exp4_responsibility_importance_surprise %>% 
  fun_brms_table(format = "latex")
% latex table generated in R 4.0.3 by xtable 1.8-4 package
% Wed Jul  7 09:30:02 2021
\begin{table}[ht]
\centering
\caption{Caption} 
\label{tab:table}
\begin{tabular}{lrrrr}
  \toprule
term & estimate & std.error & lower 95\% HDI & upper 95\% HDI \\ 
  \midrule
intercept & 25.53 & 3.53 & 18.66 & 32.42 \\ 
  importance & 0.64 & 0.04 & 0.57 & 0.72 \\ 
  surprise & 0.05 & 0.01 & 0.03 & 0.08 \\ 
   \bottomrule
\end{tabular}
\end{table}

6.4.1.4 4. Moral badness interacts with importance and surprise to predict responsibility

fit.brm_exp4_responsibility_badness_dummy %>% 
  fun_brms_table(format = "latex")
Warning in tidy.brmsfit(.): some parameter names contain underscores: term
naming may be unreliable!
% latex table generated in R 4.0.3 by xtable 1.8-4 package
% Wed Jul  7 09:30:02 2021
\begin{table}[ht]
\centering
\caption{Caption} 
\label{tab:table}
\begin{tabular}{lrrrr}
  \toprule
term & estimate & std.error & lower 95\% HDI & upper 95\% HDI \\ 
  \midrule
intercept & 26.45 & 3.59 & 19.45 & 33.52 \\ 
  badness\_dummy & -7.92 & 2.09 & -12.12 & -4.07 \\ 
  importance & 0.64 & 0.04 & 0.56 & 0.71 \\ 
  surprise & 0.05 & 0.02 & 0.02 & 0.08 \\ 
  badness\_dummy:importance & 0.04 & 0.03 & -0.01 & 0.10 \\ 
  badness\_dummy:surprise & 0.08 & 0.03 & 0.02 & 0.13 \\ 
   \bottomrule
\end{tabular}
\end{table}

6.4.1.5 5. Importance matters more for responsibility

fit.brm_exp4_responsibility_importance_surprise_scaled %>% 
    tidy() %>% 
    filter(effect == "fixed") %>% 
    select(term:conf.high) %>% 
    rename(`lower 95\\% HDI` = conf.low,
           `upper 95\\% HDI` = conf.high) %>% 
  mutate(response = "responsibility") %>% 
  bind_rows(
    fit.brm_exp4_wrongfulness_importance_surprise_scaled %>% 
    tidy() %>% 
    filter(effect == "fixed") %>% 
    select(term:conf.high) %>% 
    rename(`lower 95\\% HDI` = conf.low,
           `upper 95\\% HDI` = conf.high) %>% 
  mutate(response = "wrongfulness")) %>% 
  relocate(response) %>% 
  filter(term != "(Intercept)") %>% 
  print_table("latex")
% latex table generated in R 4.0.3 by xtable 1.8-4 package
% Wed Jul  7 09:30:02 2021
\begin{table}[ht]
\centering
\caption{Caption} 
\label{tab:table}
\begin{tabular}{llrrrr}
  \toprule
response & term & estimate & std.error & lower 95\% HDI & upper 95\% HDI \\ 
  \midrule
responsibility & importance & 0.72 & 0.04 & 0.63 & 0.80 \\ 
  responsibility & surprise & 0.09 & 0.02 & 0.04 & 0.13 \\ 
  wrongfulness & importance & -0.06 & 0.04 & -0.15 & 0.01 \\ 
  wrongfulness & surprise & 0.43 & 0.05 & 0.33 & 0.53 \\ 
   \bottomrule
\end{tabular}
\end{table}

6.4.2 Correlations

df.exp4.long %>% 
  group_by(question, trial, bad, pivotality) %>% 
  summarize(mean = mean(rating)) %>% 
  ungroup() %>% 
  pivot_wider(names_from = question,
              values_from = mean) %>% 
  select(-c(trial, bad)) %>% 
  relocate(responsibility, wrongfulness, surprise, importance, badness, pivotality) %>% 
  correlate() %>% 
  shave() %>% 
  fashion() %>% 
  as_tibble() %>% 
  select(-pivotality) %>% 
  filter(term != "responsibility") %>% 
  print_table()
term responsibility wrongfulness surprise importance badness
wrongfulness .04
surprise .41 .78
importance .99 -.03 .35
badness .11 .96 .83 .04
pivotality .96 -.18 .21 .97 -.10

6.4.3 Individual participant correlations

df.exp4.individual.correlations %>% 
  pivot_longer(cols = -participant,
               names_to = "index",
               values_to = "correlation") %>% 
  mutate(index = str_replace(index, "r_", "r("),
         index = str_replace(index, "_", ", "),
         index = str_c(index, ")"),
         index = factor(index, levels = 
                          c("r(pivotality, importance)",
                            "r(badness, surprise)",
                            "r(surprise, responsibility)",
                            "r(importance, responsibility)",
                            "r(surprise, wrongfulness)",
                            "r(importance, wrongfulness)"))) %>% 
  drop_na() %>% 
  group_by(index) %>% 
  summarize(median = median(correlation),
            `5 percentile` = quantile(correlation, 0.05),
            `95 percentile` = quantile(correlation, 0.95)) %>% 
  ungroup() %>% 
  mutate(across(-index, ~ round(., 2))) %>% 
  print_table()
index median 5 percentile 95 percentile
r(pivotality, importance) 0.85 -0.06 0.97
r(badness, surprise) 0.46 -0.12 0.76
r(surprise, responsibility) 0.19 -0.13 0.73
r(importance, responsibility) 0.83 0.14 0.98
r(surprise, wrongfulness) 0.45 -0.26 0.84
r(importance, wrongfulness) 0.00 -0.29 0.37

6.4.4 Individual participant model comparison

# Note that these two files aren't on the github repo.
# They need to be generated first by running the 
# `Individual participant model comparison` code chunks
load("data/exp4_responsibility_individual_fit.RData")
load("data/exp4_wrongfulness_individual_fit.RData")

# RESPONSIBILITY 
df.exp4.models.responsibility = tibble(model = c("baseline",
                             "importance",
                             "surprise",
                             "importance_surprise"),
                   formula = c("responsibility ~ 1",
                               "responsibility ~ 1 + importance",
                               "responsibility ~ 1 + surprise",
                               "responsibility ~ 1 + importance + surprise")) %>% 
  left_join(df.exp4.responsibility_individual_fit %>% 
              count(best_model),
            by = c("model" =  "best_model")) %>% 
  left_join(df.exp4.responsibility_individual_fit %>% 
              select(where(~ !is.list(.))) %>% 
              mutate(r_baseline = 0) %>% 
              pivot_longer(cols = -c(participant, best_model),
                           names_to = c("measure", "predictor"),
                           names_pattern = "(^[^_]+(?=_))_(.*)",
                           values_to = "value") %>% 
              filter(best_model == predictor) %>%
              group_by(best_model, measure) %>% 
              summarize(quantile = quantile(value,
                                            probs = c(0.25, 0.5, 0.75),
                                            na.rm = T)) %>% 
              mutate(index = c(0.25, 0.5, 0.75)) %>% 
              ungroup() %>% 
              pivot_wider(names_from = index,
                          values_from = quantile) %>% 
              mutate(across(where(is.numeric), ~ round(., 2))) %>% 
              mutate(value = str_c(`0.5`, " [", `0.25`, ", ", `0.75`, "]")) %>% 
              select(best_model, measure, value) %>% 
              pivot_wider(names_from = measure,
                          values_from = value),
            by = c("model" =  "best_model"))

# WRONGFULNESS 
df.exp4.models.wrongfulness = tibble(model = c("baseline",
                             "importance",
                             "surprise",
                             "importance_surprise"),
                   formula = c("wrongfulness ~ 1",
                               "wrongfulness ~ 1 + importance",
                               "wrongfulness ~ 1 + surprise",
                               "wrongfulness ~ 1 + importance + surprise")) %>% 
  left_join(df.exp4.wrongfulness_individual_fit %>% 
              count(best_model),
            by = c("model" =  "best_model")) %>% 
  left_join(df.exp4.wrongfulness_individual_fit %>% 
              select(where(~ !is.list(.))) %>% 
              mutate(r_baseline = 0) %>% 
              pivot_longer(cols = -c(participant, best_model),
                           names_to = c("measure", "predictor"),
                           names_pattern = "(^[^_]+(?=_))_(.*)",
                           values_to = "value") %>% 
              filter(best_model == predictor) %>%
              group_by(best_model, measure) %>% 
              summarize(quantile = quantile(value,
                                            probs = c(0.05, 0.5, 0.95),
                                            na.rm = T)) %>% 
              mutate(index = c(0.05, 0.5, 0.95)) %>% 
              ungroup() %>% 
              pivot_wider(names_from = index,
                          values_from = quantile) %>% 
              mutate(across(where(is.numeric), ~ round(., 2))) %>% 
              mutate(value = str_c(`0.5`, " [", `0.05`, ", ", `0.95`, "]")) %>% 
              select(best_model, measure, value) %>% 
              pivot_wider(names_from = measure,
                          values_from = value),
            by = c("model" =  "best_model"))

df.exp4.models.responsibility %>% 
  bind_rows(df.exp4.models.wrongfulness) %>% 
  # relocate(formula) %>% 
  print_table("latex")
% latex table generated in R 4.0.3 by xtable 1.8-4 package
% Wed Jul  7 09:30:06 2021
\begin{table}[ht]
\centering
\caption{Caption} 
\label{tab:table}
\begin{tabular}{llrll}
  \toprule
model & formula & n & r & rmse \\ 
  \midrule
baseline & responsibility \~{} 1 &   5 & 0 [0, 0] & 52.47 [49.78, 57.8] \\ 
  importance & responsibility \~{} 1 + importance &  31 & 0.86 [0.64, 0.92] & 9.87 [7.89, 13.22] \\ 
  surprise & responsibility \~{} 1 + surprise &   2 & 0.45 [0.35, 0.54] & 9.8 [9.03, 10.57] \\ 
  importance\_surprise & responsibility \~{} 1 + importance + surprise &  11 & 0.9 [0.86, 0.97] & 9.1 [6.59, 13.91] \\ 
  baseline & wrongfulness \~{} 1 &  12 & 0 [0, 0] & 39.42 [32.97, 44.28] \\ 
  importance & wrongfulness \~{} 1 + importance &   4 & 0.45 [0.35, 0.68] & 33.22 [10.56, 37.99] \\ 
  surprise & wrongfulness \~{} 1 + surprise &  32 & 0.61 [0.36, 0.88] & 32.08 [18, 40.97] \\ 
  importance\_surprise & wrongfulness \~{} 1 + importance + surprise &   1 & 0.53 [0.53, 0.53] & 8.87 [8.87, 8.87] \\ 
   \bottomrule
\end{tabular}
\end{table}
df.exp4.responsibility_individual_fit %>% 
  select(where(~ !is.list(.))) %>% 
  filter(best_model == "baseline") %>% 
  summarize()
# A tibble: 1 x 0

6.4.5 Scenarios

df.exp4.trialinfo %>% 
  select(trial, policy) %>% 
  mutate(trial = as.character(trial)) %>%
  rename(scenario = trial) %>% 
  print_table()
scenario policy
1 introduce corporal punishment in schools
2 introduce the death penalty for minor crimes (e.g. theft)
3 introduce prison sentences of at least 5 years for doctors who practice abortion
4 increase sales taxes on meat products (e.g. sausages or burgers) by 3 percent
5 Exempt all politicians from paying taxes
6 introduce tax breaks for dog owners
7 introducing a law that makes it illegal for people with disabilities to get a job
8 introduce a fine of at least $200 for littering in public places
9 ban children who are not vaccinated against Varicella from going to school
10 change the color of all government buildings to red
11 declare that sexual abuse within marriage does not count as a criminal liability
12 legally require every citizen to get vaccinated against the flu
13 change the legal voting age to 35 years
14 allow all citizens over the age of 21 to purchase a gun without a license
15 criminalize same-sex relationships
16 make it easier for landlords to increase rent
17 decriminalizing corruption for politicians
18 change the standard size of public trashcans from 33 gallons to 60 gallons
19 introduce prison sentences for extramarital affairs
20 changing the font of government docs to Arial
21 legalize gambling in casinos
22 make it mandatory to keep dogs on leash at all times in public
23 lie to citizens of the country about an oil leak that is poisening the country’s drinking water
24 requiring every citizen older than 12 to have their own social media profile for official identification purposes
set.seed(1)

df.exp4.trialinfo %>% 
  select(trial, policy, votes) %>% 
  left_join(df.exp4.long %>% 
              filter(question == "badness") %>% 
              group_by(trial) %>% 
              summarize(value = round(smean.cl.boot(rating), 2)) %>% 
              mutate(name = c("mean", "low", "high")) %>% 
              pivot_wider() %>% 
              mutate(badness = str_c(mean, " [", low, ", ", high, "]")),
            by = "trial") %>% 
  arrange(mean) %>% 
  mutate(votes = as.character(votes),
         index = 1:n()) %>% 
  select(index, policy, badness, votes) %>% 
  print_table("latex")

6.4.6 Averaged ratings across the scenarios

df.exp4.regression.means %>% 
  mutate(across(c(trial, votes), ~as.factor(.))) %>% 
  mutate(across(where(is.numeric), ~ round(., 2))) %>% 
  mutate(badness = str_c(badness, " [", badness_low, ", ", badness_high, "]"),
         importance = str_c(importance, " [", importance_low, ", ", importance_high, "]"),
         surprise = str_c(surprise, " [", surprise_low, ", ", surprise_high, "]"),
         responsibility = str_c(responsibility, " [", responsibility_low, ", ", responsibility_high, "]"),
         wrongfulness = str_c(wrongfulness, " [", wrongfulness_low, ", ", wrongfulness_high, "]")) %>% 
  select(scenario = trial, votes, badness, importance, surprise, responsibility, wrongfulness) %>% 
  # left_join(df.exp4.trialinfo %>% 
  #             select(trial, policy),
  #           by = "trial") %>% 
  # relocate(policy, .after = trial) %>% 
  print_table()
scenario votes badness importance surprise responsibility wrongfulness
1 2 83.64 [76.34, 90.16] 90.14 [86, 93.94] 65.2 [58.22, 72.18] 87.32 [81.7, 92.16] 83.02 [76.26, 89.16]
2 3 96.4 [93.48, 98.68] 61.56 [55.86, 67.06] 73.92 [66.32, 81.58] 69.46 [63.34, 75.4] 95.34 [92.2, 98]
3 3 73.58 [63.98, 83.2] 59.72 [52.16, 66.8] 54.46 [46.02, 62.7] 63.56 [56.56, 70.06] 72.68 [62.2, 83]
4 5 40.1 [32.02, 48.68] 36 [28.66, 44.06] 37.36 [29.96, 44.84] 42.48 [35.08, 50.02] 35.94 [27.08, 45.76]
5 5 88.64 [81.16, 94.9] 37.32 [29.48, 45.36] 31.94 [21.16, 42.26] 51.8 [43.02, 60.24] 86.4 [77.88, 93.7]
6 3 37.18 [28.68, 45.92] 41.26 [34.6, 47.88] 35.88 [29.82, 41.86] 57.64 [52.3, 62.84] 28.92 [21.4, 36.54]
7 5 95.86 [91.16, 99.08] 45.22 [36.02, 54.76] 72.2 [63.74, 80.92] 55.78 [47.14, 63.94] 94.68 [90.04, 98.24]
8 4 16.1 [10.5, 22.94] 43.9 [36.62, 51.48] 18.9 [13.66, 24.46] 48.94 [42.58, 56.04] 13.24 [8.62, 18.52]
9 3 45.6 [35.74, 55.76] 54.62 [48.68, 60.48] 37.6 [30.76, 44.34] 57.78 [50.5, 64.38] 41.56 [32.68, 50.03]
10 2 63.58 [52.92, 74.72] 85.58 [79.16, 90.98] 55.66 [47.7, 63.6] 85.52 [80.74, 90.12] 20.44 [13.68, 28.42]
11 4 95.64 [92.26, 98.5] 50.08 [41.88, 57.98] 66.64 [55.99, 75.52] 60.06 [51.84, 67.22] 92.02 [86.72, 96.1]
12 4 49.2 [38.98, 59.24] 46.66 [39.24, 55.26] 36.44 [29.22, 44.62] 52.82 [45.08, 60.64] 48.58 [38.2, 59.34]
13 5 75.56 [66.68, 83.62] 37.68 [29.28, 46.26] 53.66 [44.14, 63.02] 47.62 [39.1, 56.02] 79.3 [71.7, 86.56]
14 3 73.96 [65.59, 81.98] 50.64 [42.84, 57.95] 55.7 [47.23, 63.6] 60.54 [53.56, 67.62] 72.58 [64, 80.44]
15 5 82.12 [72.48, 90.96] 39.82 [31.58, 48.82] 61.52 [51.66, 71.18] 53.62 [45.22, 61.82] 81.3 [72.12, 90.26]
16 2 69.8 [61.92, 77.28] 90.48 [87.04, 93.7] 52.16 [45.18, 60.13] 89.14 [84.9, 93.18] 71.18 [63.12, 78.8]
17 4 89.04 [82.08, 94.9] 49.06 [41.62, 57.32] 42.7 [32.76, 52.9] 55.68 [47.78, 63.38] 84.22 [75.98, 91.32]
18 5 10.76 [6.2, 15.94] 28.28 [20.92, 36.42] 18.38 [11.96, 25.9] 40.04 [31.74, 48.86] 11.04 [5.58, 17.18]
19 2 75.86 [67.18, 83.4] 87.54 [82.24, 92.02] 62.86 [54.5, 71.1] 86.72 [81.42, 91.04] 71.42 [62.12, 79.76]
20 2 13.28 [7, 19.64] 90 [86.1, 93.8] 43.3 [34.48, 51.16] 88.3 [83.86, 92.26] 11.3 [6.04, 17.22]
21 2 25.96 [18.48, 33.64] 86.12 [81.18, 90.48] 33.94 [26.48, 41.18] 86.76 [82.58, 90.92] 24.06 [17.16, 30.94]
22 3 18.14 [12.1, 25.38] 52.16 [45.18, 59.49] 25.96 [20.14, 31.66] 55.74 [49.58, 61.98] 14.76 [9.48, 20.32]
23 4 97.56 [95.24, 99.3] 50.82 [42.88, 59.22] 67.66 [58.52, 76.74] 59.66 [52.04, 67.43] 96.18 [92.58, 98.76]
24 2 81.96 [73.78, 89.46] 88.16 [83.56, 92.14] 64.28 [55.76, 73.18] 87.58 [82.69, 92] 81.34 [72.72, 88.72]

6.5 Plots

6.5.1 1. Votes predict importance

set.seed(1)

df.plot = df.exp4.long %>% 
  filter(question == "importance")

ggplot(data = df.plot,
       mapping = aes(x = votes,
                     y = rating)) + 
  geom_point(alpha = 0.05,
             position = position_jitter(width = 0.05, height = 0)) +
  geom_smooth(method = "lm",
              se = F,
              formula = y ~ x,
              color = "red",
              fill = "red",
              size = 2) + 
  geom_smooth(method = "lm",
              se = F,
              formula = y ~ I(1/(x - 1)),
              color = "blue",
              fill = "blue",
              size = 2) + 
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               shape = 21,
               fill = "white") +
  labs(y = "importance rating",
       x = "number of votes in favor") + 
  theme(text = element_text(size = 24),
        panel.grid.major.y = element_line())

# ggsave("../../figures/plots/experiment4_votes_importance.pdf",
#        width = 8,
#        height = 4)

6.5.2 2. Badness predicts surprise

set.seed(1)

# df.plot = df.exp4.regression.means

x = "badness"
y = "surprise"

df.plot = fit.brm_exp4_surprise_badness %>%
  fitted(newdata = df.exp4.regression.means,
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp4.regression.means)

# scatter plot 
ggplot(data = df.plot,
       mapping = aes(x = .data[[x]],
                     y = .data[[y]])) + 
  geom_smooth(mapping = aes(y = estimate,
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              alpha = 0.4,
              color = "lightblue",
              fill = "lightblue") + 
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_linerange(mapping = aes(xmin = .data[[str_c(x, "_low")]],
                               xmax = .data[[str_c(x, "_high")]]),
                 alpha = 0.2) + 
  geom_linerange(mapping = aes(ymin = .data[[str_c(y, "_low")]],
                               ymax = .data[[str_c(y, "_high")]]),
                 alpha = 0.2) + 
  geom_point(size = 2) +
  annotate(geom = "text",
           label = df.plot %>%
             summarize(r = cor(.data[[x]], .data[[y]]),
                       rmse = rmse(.data[[x]], .data[[y]])) %>%
             mutate(across(.fns = ~ round(., 2))) %>%
             unlist() %>%
             str_c(names(.), " = ", .),
           x = c(0, 0),
           y = c(100, 90),
           size = 7,
           hjust = 0) + 
  scale_x_continuous(breaks = seq(0, 100, 25)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  coord_cartesian(xlim = c(0, 100),
                  ylim = c(0, 100)) +
  theme(text = element_text(size = 24))

# ggsave("../../figures/plots/experiment4_badness_surprise.pdf",
#        width = 5,
#        height = 5)

6.5.3 3. Importance and surprise predict responsibility

set.seed(1)

df.plot = fit.brm_exp4_responsibility_importance_surprise %>% 
  fitted(newdata = df.exp4.regression.means,
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp4.regression.means)

x = "estimate"  
y = "responsibility"

ggplot(data = df.plot,
       mapping = aes(x = .data[[x]],
                     y = .data[[y]])) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = estimate,
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.2) +
  geom_point(size = 2,
             alpha = 1) +
  annotate(geom = "text",
           label = df.plot %>% 
             summarize(r = cor(.data[[x]], .data[[y]]),
                       rmse = rmse(.data[[x]], .data[[y]])) %>% 
             mutate(across(.fns = ~ round(., 2))) %>% 
             unlist() %>% 
             str_c(names(.), " = ", .),
           x = c(0, 0), 
           y = c(100, 90),
           size = 7,
           hjust = 0) + 
  labs(x = "surprise & importance",
       y = y) +
  scale_x_continuous(breaks = seq(0, 100, 25)) +
  scale_y_continuous(breaks = seq(0, 100, 25)) +
  coord_cartesian(xlim = c(0, 100),
                  ylim = c(0, 100)) +
  theme(text = element_text(size = 24))

# ggsave("../../figures/plots/experiment4_importance_surprise_responsibility.pdf",
#        width = 5,
#        height = 5)

6.5.4 4. Moral badness interaction

ggpredict(fit.brm_exp4_responsibility_badness_dummy,
          terms = c("importance", "badness_dummy")) %>% 
  plot() +
  theme_classic() +
  labs(x = "importance", 
       y = "responsibility") + 
  scale_color_manual(labels = c("bad", "neutral"),
                     values = c("red", "blue")) +
  labs(color = "badness") + 
  scale_y_continuous(breaks = seq(0, 100, 25),
                     limits = c(0, 100)) + 
  theme(text = element_text(size = 24),
        plot.title = element_blank(),
        legend.position = c(0, 1),
        legend.justification = c(-0.25, 1)) +
  guides(color = guide_legend(override.aes = list(fill = NA)))

# ggsave("../../figures/plots/experiment4_responsibility_importance_dummy.pdf",
#        width = 5,
#        height = 5)

ggpredict(fit.brm_exp4_responsibility_badness_dummy,
          terms = c("surprise", "badness_dummy")) %>% 
  plot() +
  theme_classic() +
  labs(x = "surprise", 
       y = "responsibility") + 
  scale_color_manual(labels = c("bad", "neutral"),
                     values = c("red", "blue")) +
  labs(color = "badness") + 
  scale_y_continuous(breaks = seq(0, 100, 25),
                     limits = c(0, 100)) + 
  theme(text = element_text(size = 24),
        plot.title = element_blank(),
        legend.position = c(0, 1),
        legend.justification = c(-0.25, 1)) +
  guides(color = guide_legend(override.aes = list(fill = NA)))
Note: uncertainty of error terms are not taken into account. You may want to use `rstantools::posterior_predict()`.
Scale for 'colour' is already present. Adding another scale for 'colour',
which will replace the existing scale.
Scale for 'y' is already present. Adding another scale for 'y', which will
replace the existing scale.
# ggsave("../../figures/plots/experiment4_responsibility_surprise_dummy.pdf",
#        width = 5,
#        height = 5)

Posterior distributions of the interaction terms.

fit.brm_exp4_responsibility_badness_dummy %>% 
  emtrends(specs = "badness_dummy",
           var = c("importance")) %>% 
  contrast(interaction = "consec") %>% 
  gather_emmeans_draws() %>% 
  ggplot(data = .,
         mapping = aes(x = .value,
                       fill = stat(x > 0))) + 
  stat_halfeye(show.legend = F) + 
  geom_vline(xintercept = 0,
             linetype = 2) + 
  scale_fill_manual(values = c("gray80", "lightblue")) +
  labs(title = "Difference in slopes for importance ratings",
       subtitle = "We correctly predicted a positive difference in slopes.")

fit.brm_exp4_responsibility_badness_dummy %>% 
  emtrends(specs = "badness_dummy",
           var = c("surprise")) %>% 
  contrast(interaction = "consec") %>% 
  gather_emmeans_draws() %>% 
  ggplot(data = .,
         mapping = aes(x = .value,
                       fill = stat(x < 0))) + 
  stat_halfeye(show.legend = F) + 
  geom_vline(xintercept = 0,
             linetype = 2) + 
  scale_fill_manual(values = c("gray80", "lightblue")) + 
  labs(title = "Difference in slopes for surprise ratings",
       subtitle = "We incorrectly predicted a negative difference in slopes.")

6.5.5 5. Importance matters more for responsibility

6.5.5.1 Responsibility

set.seed(1)

df.plot = fit.brm_exp4_responsibility_importance_surprise_scaled %>% 
  fitted(newdata = df.exp4.regression.scaled.means,
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp4.regression.scaled.means)

y = "responsibility"

ggplot(data = df.plot,
       mapping = aes(x = estimate,
                     y = .data[[y]])) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = estimate,
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.2) +
  geom_point(size = 2) +
  annotate(geom = "text",
           label = df.plot %>%
             summarize(r = cor(.data[["estimate"]], .data[[y]]),
                       rmse = rmse(.data[["estimate"]], .data[[y]])) %>%
             mutate(across(.fns = ~ round(., 2))) %>%
             unlist() %>%
             str_c(names(.), " = ", .),
           x = c(-1.1, -1.1),
           y = c(1, 0.8),
           size = 7,
           hjust = 0) +
  labs(x = "surprise & importance",
       y = y) +
  scale_x_continuous(breaks = seq(-1, 1, 0.5)) +
  scale_y_continuous(breaks = seq(-1, 1, 0.5)) +
  coord_cartesian(xlim = c(-1.25, 1.25),
                  ylim = c(-1.25, 1.25)) +
  theme(text = element_text(size = 24))

# ggsave("../../figures/plots/experiment4_responsibility_surprise_importance_scaled.pdf",
#        width = 5,
#        height = 5)

6.5.5.2 Wrongfulness

set.seed(1)

df.plot = fit.brm_exp4_wrongfulness_importance_surprise_scaled %>% 
  fitted(newdata = df.exp4.regression.scaled.means,
         re_formula = NA) %>% 
  as_tibble() %>% 
  clean_names() %>%
  bind_cols(df.exp4.regression.scaled.means)

y = "wrongfulness"

ggplot(data = df.plot,
       mapping = aes(x = estimate,
                     y = .data[[y]])) +
  geom_abline(intercept = 0,
              slope = 1,
              linetype = 2) +
  geom_smooth(mapping = aes(y = estimate,
                            ymin = q2_5,
                            ymax = q97_5),
              stat = "identity",
              color = "lightblue",
              alpha = 0.4,
              fill = "lightblue") +
  geom_linerange(mapping = aes_string(ymin = str_c(y, "_low"),
                               ymax = str_c(y, "_high")),
                 alpha = 0.2) +
  geom_point(size = 2) +
  annotate(geom = "text",
           label = df.plot %>%
             summarize(r = cor(.data[["estimate"]], .data[[y]]),
                       rmse = rmse(.data[["estimate"]], .data[[y]])) %>%
             mutate(across(.fns = ~ round(., 2))) %>%
             unlist() %>%
             str_c(names(.), " = ", .),
           x = c(-1.1, -1.1),
           y = c(1, 0.8),
           size = 7,
           hjust = 0) +
  labs(x = "surprise & importance",
       y = y) +
  scale_x_continuous(breaks = seq(-1, 1, 0.5)) +
  scale_y_continuous(breaks = seq(-1, 1, 0.5)) +
  coord_cartesian(xlim = c(-1.25, 1.25),
                  ylim = c(-1.25, 1.25)) +
  theme(text = element_text(size = 24))

# ggsave("../../figures/plots/experiment4_wrongfulness_surprise_importance_scaled.pdf",
#        width = 5,
#        height = 5)

6.5.6 Individual participant analysis

set.seed(1)

df.plot = df.exp4.individual.correlations %>% 
  pivot_longer(cols = -participant,
               names_to = "index",
               values_to = "correlation") %>% 
  mutate(index = str_replace(index, "r_", "r("),
         index = str_replace(index, "_", ", "),
         index = str_c(index, ")"),
         index = factor(index, levels = 
                          c("r(pivotality, importance)",
                            "r(badness, surprise)",
                            "r(surprise, responsibility)",
                            "r(importance, responsibility)",
                            "r(surprise, wrongfulness)",
                            "r(importance, wrongfulness)"))) %>% 
  drop_na()

df.plot1 = df.plot %>% 
  group_by(index) %>% 
  summarize(value = smean.cl.boot(correlation)) %>% 
  mutate(estimate = c("mean", "low", "high")) %>% 
  pivot_wider(names_from = estimate,
              values_from = value)

ggplot(data = df.plot) + 
  geom_vline(xintercept = seq(-1, 1, 0.25),
             linetype = 2,
             alpha = 0.1) + 
  geom_density(mapping = aes(x = correlation),
               fill = "lightblue",
               bw = 0.05) +
  geom_pointrange(data = df.plot1,
                  mapping = aes(x = mean,
                                xmin = low,
                                xmax = high,
                                y = 0.3),
                  shape = 21, 
                  fill = "white",
                  size = 1) + 
  facet_wrap(~index, ncol = 1) + 
  scale_y_continuous(expand = expansion(mult = c(0, 0.1))) +
  theme(text = element_text(size = 16))

6.6 Misc

6.6.1 Votes

set.seed(1)

df.plot = df.exp4.long %>% 
  mutate(votes = as.factor(votes),
         question = factor(question,
                           levels = c("importance", "responsibility",
                                      "surprise", "badness", "wrongfulness")))

ggplot(data = df.plot,
       mapping = aes(x = votes,
                     y = rating,
                     color = question)) + 
  stat_summary(fun.data = "mean_cl_boot",
               position = position_dodge(width = 0.5))

6.6.2 Votes, badness, responsibility

y = "responsibility"

df.plot = df.exp4.long %>% 
  filter(question == y) %>% 
  mutate(across(.cols = c(bad, votes, trial),
                .fns = ~ as.factor(.)))

ggplot(data = df.plot, 
       mapping = aes(x = votes,
                     y = rating,
                     color = bad)) + 
  stat_summary(fun.data = "mean_cl_boot",
               position = position_dodge(width = 0.9)) + 
  geom_point(alpha = 0.05,
             position = position_jitterdodge(dodge.width = 0.9,
                                             jitter.width = 0.1,
                                             jitter.height = 0)) +
  # scale_color_brewer(palette = "Set1") + 
  theme(legend.position = "bottom") + 
  guides(color = guide_legend(nrow = 1))

df.exp4.regression.means %>% 
  select(trial, bad, votes, responsibility) %>% 
  left_join(df.exp4.trialinfo,
            by = c("trial", "bad", "votes"))
# A tibble: 24 x 6
   trial   bad votes responsibility trial_label policy                          
   <dbl> <dbl> <dbl>          <dbl>       <dbl> <chr>                           
 1     1     6     2           87.3           1 introduce corporal punishment i…
 2     2     6     3           69.5           2 introduce the death penalty for…
 3     3     5     3           63.6           3 introduce prison sentences of a…
 4     4     2     5           42.5           4 increase sales taxes on meat pr…
 5     5     4     5           51.8           5 Exempt all politicians from pay…
 6     6     2     3           57.6           6 introduce tax breaks for dog ow…
 7     7     6     5           55.8           8 introducing a law that makes it…
 8     8     3     4           48.9           9 introduce a fine of at least $2…
 9     9     3     3           57.8          10 ban children who are not vaccin…
10    10     1     2           85.5          12 change the color of all governm…
# … with 14 more rows

6.6.3 Per trial

df.plot = df.exp4.long %>% 
  mutate(across(.cols = c(bad, votes, trial),
                .fns = ~ as.factor(.)))

ggplot(data = df.plot,
       mapping = aes(x = trial,
                     y = rating,
                     group = question,
                     shape = votes,
                     color = bad)) + 
  stat_summary(fun.data = "mean_cl_boot",
               position = position_dodge(width = 1)) + 
  geom_point(alpha = 0.1,
             position = position_dodge(width = 1)) + 
  facet_wrap(~ question, ncol = 1)

6.7 Session info

Information about this R session including which version of R was used, and what packages were loaded.

sessionInfo()
R version 4.0.3 (2020-10-10)
Platform: x86_64-apple-darwin17.0 (64-bit)
Running under: macOS Big Sur 10.16

Matrix products: default
BLAS:   /Library/Frameworks/R.framework/Versions/4.0/Resources/lib/libRblas.dylib
LAPACK: /Library/Frameworks/R.framework/Versions/4.0/Resources/lib/libRlapack.dylib

locale:
[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8

attached base packages:
[1] stats     graphics  grDevices utils     datasets  methods   base     

other attached packages:
 [1] forcats_0.5.1     stringr_1.4.0     dplyr_1.0.4       purrr_0.3.4      
 [5] readr_1.4.0       tidyr_1.1.2       tibble_3.0.6      tidyverse_1.3.0  
 [9] patchwork_1.1.1   corrr_0.4.3       kableExtra_1.3.1  xtable_1.8-4     
[13] ggeffects_1.0.1   emmeans_1.5.3     tidybayes_2.3.1   brms_2.14.4      
[17] Rcpp_1.0.6        broom.mixed_0.2.6 Hmisc_4.4-2       ggplot2_3.3.3    
[21] Formula_1.2-4     survival_3.2-7    lattice_0.20-41   DT_0.17          
[25] janitor_2.1.0     lsr_0.5           lubridate_1.7.9.2 knitr_1.31       
[29] tidyjson_0.3.1    RSQLite_2.2.3    

loaded via a namespace (and not attached):
  [1] utf8_1.1.4           tidyselect_1.1.0     lme4_1.1-26         
  [4] htmlwidgets_1.5.3    grid_4.0.3           munsell_0.5.0       
  [7] codetools_0.2-18     statmod_1.4.35       miniUI_0.1.1.1      
 [10] withr_2.4.1          Brobdingnag_1.2-6    colorspace_2.0-0    
 [13] highr_0.8            rstudioapi_0.13      stats4_4.0.3        
 [16] bayesplot_1.8.0      labeling_0.4.2       rstan_2.21.1        
 [19] bit64_4.0.5          farver_2.1.0         bridgesampling_1.0-0
 [22] coda_0.19-4          vctrs_0.3.6          generics_0.1.0      
 [25] TH.data_1.0-10       xfun_0.21            R6_2.5.0            
 [28] markdown_1.1         HDInterval_0.2.2     gamm4_0.2-6         
 [31] projpred_2.0.2       cachem_1.0.1         assertthat_0.2.1    
 [34] promises_1.1.1       scales_1.1.1         multcomp_1.4-15     
 [37] nnet_7.3-15          gtable_0.3.0         processx_3.4.5      
 [40] sandwich_3.0-0       rlang_0.4.10         splines_4.0.3       
 [43] TMB_1.7.18           broom_0.7.3          checkmate_2.0.0     
 [46] inline_0.3.17        yaml_2.2.1           reshape2_1.4.4      
 [49] abind_1.4-5          modelr_0.1.8         threejs_0.3.3       
 [52] crosstalk_1.1.1      backports_1.2.1      httpuv_1.5.5        
 [55] rsconnect_0.8.16     tools_4.0.3          bookdown_0.21       
 [58] ellipsis_0.3.1       RColorBrewer_1.1-2   ggridges_0.5.3      
 [61] plyr_1.8.6           base64enc_0.1-3      ps_1.6.0            
 [64] prettyunits_1.1.1    rpart_4.1-15         zoo_1.8-8           
 [67] haven_2.3.1          cluster_2.1.0        fs_1.5.0            
 [70] magrittr_2.0.1       data.table_1.13.6    ggdist_2.4.0        
 [73] colourpicker_1.1.0   reprex_1.0.0         mvtnorm_1.1-1       
 [76] matrixStats_0.57.0   hms_1.0.0            shinyjs_2.0.0       
 [79] mime_0.10            evaluate_0.14        arrayhelpers_1.1-0  
 [82] shinystan_2.5.0      jpeg_0.1-8.1         readxl_1.3.1        
 [85] gridExtra_2.3        rstantools_2.1.1     compiler_4.0.3      
 [88] V8_3.4.0             crayon_1.4.1         minqa_1.2.4         
 [91] StanHeaders_2.21.0-7 htmltools_0.5.1.1    mgcv_1.8-33         
 [94] later_1.1.0.1        RcppParallel_5.0.2   DBI_1.1.1           
 [97] sjlabelled_1.1.7     dbplyr_2.0.0         MASS_7.3-53         
[100] boot_1.3-26          Matrix_1.3-2         cli_2.3.0           
[103] parallel_4.0.3       insight_0.13.1.1     igraph_1.2.6        
[106] pkgconfig_2.0.3      foreign_0.8-81       xml2_1.3.2          
[109] svUnit_1.0.3         dygraphs_1.1.1.6     webshot_0.5.2       
[112] estimability_1.3     rvest_0.3.6          snakecase_0.11.0    
[115] distributional_0.2.1 callr_3.5.1          digest_0.6.27       
[118] rmarkdown_2.6        cellranger_1.1.0     htmlTable_2.1.0     
[121] curl_4.3             shiny_1.6.0          gtools_3.8.2        
[124] rjson_0.2.20         nloptr_1.2.2.2       lifecycle_1.0.0     
[127] nlme_3.1-151         jsonlite_1.7.2       viridisLite_0.3.0   
[130] fansi_0.4.2          pillar_1.4.7         loo_2.4.1.9000      
[133] fastmap_1.1.0        httr_1.4.2           pkgbuild_1.2.0      
[136] glue_1.4.2           xts_0.12.1           png_0.1-7           
[139] shinythemes_1.2.0    bit_4.0.4            stringi_1.5.3       
[142] blob_1.2.1           latticeExtra_0.6-29  memoise_2.0.0